repo_name
stringclasses
6 values
pr_number
int64
512
78.9k
pr_title
stringlengths
3
144
pr_description
stringlengths
0
30.3k
author
stringlengths
2
21
date_created
timestamp[ns, tz=UTC]
date_merged
timestamp[ns, tz=UTC]
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
17
30.4k
filepath
stringlengths
9
210
before_content
stringlengths
0
112M
after_content
stringlengths
0
112M
label
int64
-1
1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/tests/JIT/HardwareIntrinsics/General/Vector64/Dot.SByte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void DotSByte() { var test = new VectorBinaryOpTest__DotSByte(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__DotSByte { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private GCHandle inHandle1; private GCHandle inHandle2; private ulong alignment; public DataTable(SByte[] inArray1, SByte[] inArray2, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<SByte>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<SByte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<SByte> _fld1; public Vector64<SByte> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref testStruct._fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__DotSByte testClass) { var result = Vector64.Dot(_fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, result); } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte); private static SByte[] _data1 = new SByte[Op1ElementCount]; private static SByte[] _data2 = new SByte[Op2ElementCount]; private static Vector64<SByte> _clsVar1; private static Vector64<SByte> _clsVar2; private Vector64<SByte> _fld1; private Vector64<SByte> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__DotSByte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _clsVar1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); } public VectorBinaryOpTest__DotSByte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new DataTable(_data1, _data2, LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector64.Dot( Unsafe.Read<Vector64<SByte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<SByte>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector64).GetMethod(nameof(Vector64.Dot), new Type[] { typeof(Vector64<SByte>), typeof(Vector64<SByte>) }); if (method is null) { method = typeof(Vector64).GetMethod(nameof(Vector64.Dot), 1, new Type[] { typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(SByte)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector64<SByte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<SByte>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (SByte)(result)); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector64.Dot( _clsVar1, _clsVar2 ); ValidateResult(_clsVar1, _clsVar2, result); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<SByte>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<SByte>>(_dataTable.inArray2Ptr); var result = Vector64.Dot(op1, op2); ValidateResult(op1, op2, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__DotSByte(); var result = Vector64.Dot(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector64.Dot(_fld1, _fld2); ValidateResult(_fld1, _fld2, result); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector64.Dot(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector64<SByte> op1, Vector64<SByte> op2, SByte result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(void* op1, void* op2, SByte result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<SByte>>()); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(SByte[] left, SByte[] right, SByte result, [CallerMemberName] string method = "") { bool succeeded = true; SByte actualResult = default; SByte intermResult = default; for (var i = 0; i < Op1ElementCount; i++) { if ((i % Vector128<SByte>.Count) == 0) { actualResult += intermResult; intermResult = default; } intermResult += (SByte)(left[i] * right[i]); } actualResult += intermResult; if (actualResult != result) { succeeded = false; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.{nameof(Vector64.Dot)}<SByte>(Vector64<SByte>, Vector64<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: {result}"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void DotSByte() { var test = new VectorBinaryOpTest__DotSByte(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__DotSByte { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private GCHandle inHandle1; private GCHandle inHandle2; private ulong alignment; public DataTable(SByte[] inArray1, SByte[] inArray2, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<SByte>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<SByte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<SByte> _fld1; public Vector64<SByte> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref testStruct._fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__DotSByte testClass) { var result = Vector64.Dot(_fld1, _fld2); testClass.ValidateResult(_fld1, _fld2, result); } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<SByte>>() / sizeof(SByte); private static SByte[] _data1 = new SByte[Op1ElementCount]; private static SByte[] _data2 = new SByte[Op2ElementCount]; private static Vector64<SByte> _clsVar1; private static Vector64<SByte> _clsVar2; private Vector64<SByte> _fld1; private Vector64<SByte> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__DotSByte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _clsVar1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); } public VectorBinaryOpTest__DotSByte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _fld1), ref Unsafe.As<SByte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSByte(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new DataTable(_data1, _data2, LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector64.Dot( Unsafe.Read<Vector64<SByte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<SByte>>(_dataTable.inArray2Ptr) ); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector64).GetMethod(nameof(Vector64.Dot), new Type[] { typeof(Vector64<SByte>), typeof(Vector64<SByte>) }); if (method is null) { method = typeof(Vector64).GetMethod(nameof(Vector64.Dot), 1, new Type[] { typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(SByte)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector64<SByte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector64<SByte>>(_dataTable.inArray2Ptr) }); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, (SByte)(result)); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector64.Dot( _clsVar1, _clsVar2 ); ValidateResult(_clsVar1, _clsVar2, result); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<SByte>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector64<SByte>>(_dataTable.inArray2Ptr); var result = Vector64.Dot(op1, op2); ValidateResult(op1, op2, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__DotSByte(); var result = Vector64.Dot(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector64.Dot(_fld1, _fld2); ValidateResult(_fld1, _fld2, result); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector64.Dot(test._fld1, test._fld2); ValidateResult(test._fld1, test._fld2, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector64<SByte> op1, Vector64<SByte> op2, SByte result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(void* op1, void* op2, SByte result, [CallerMemberName] string method = "") { SByte[] inArray1 = new SByte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<SByte>>()); ValidateResult(inArray1, inArray2, result, method); } private void ValidateResult(SByte[] left, SByte[] right, SByte result, [CallerMemberName] string method = "") { bool succeeded = true; SByte actualResult = default; SByte intermResult = default; for (var i = 0; i < Op1ElementCount; i++) { if ((i % Vector128<SByte>.Count) == 0) { actualResult += intermResult; intermResult = default; } intermResult += (SByte)(left[i] * right[i]); } actualResult += intermResult; if (actualResult != result) { succeeded = false; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.{nameof(Vector64.Dot)}<SByte>(Vector64<SByte>, Vector64<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: {result}"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/mono/System.Private.CoreLib/src/Mono/RuntimeHandles.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Reflection; using System.Runtime.CompilerServices; namespace Mono { internal unsafe struct RuntimeClassHandle : IEquatable<RuntimeClassHandle> { private readonly RuntimeStructs.MonoClass* value; internal RuntimeClassHandle(RuntimeStructs.MonoClass* value) { this.value = value; } internal RuntimeClassHandle(IntPtr ptr) { this.value = (RuntimeStructs.MonoClass*)ptr; } internal RuntimeStructs.MonoClass* Value => value; public override bool Equals(object? obj) { if (obj == null || GetType() != obj.GetType()) return false; return value == ((RuntimeClassHandle)obj).Value; } public override int GetHashCode() => ((IntPtr)value).GetHashCode(); public bool Equals(RuntimeClassHandle handle) { return value == handle.Value; } public static bool operator ==(RuntimeClassHandle left, object? right) { return right != null && right is RuntimeClassHandle rch && left.Equals(rch); } public static bool operator !=(RuntimeClassHandle left, object? right) { return !(left == right); } public static bool operator ==(object? left, RuntimeClassHandle right) { return left != null && left is RuntimeClassHandle rch && rch.Equals(right); } public static bool operator !=(object? left, RuntimeClassHandle right) { return !(left == right); } [MethodImpl(MethodImplOptions.InternalCall)] internal static extern unsafe IntPtr GetTypeFromClass(RuntimeStructs.MonoClass* klass); internal RuntimeTypeHandle GetTypeHandle() => new RuntimeTypeHandle(GetTypeFromClass(value)); } internal unsafe struct RuntimeRemoteClassHandle { private readonly RuntimeStructs.RemoteClass* value; internal RuntimeRemoteClassHandle(RuntimeStructs.RemoteClass* value) { this.value = value; } internal RuntimeClassHandle ProxyClass { get { return new RuntimeClassHandle(value->proxy_class); } } } internal unsafe struct RuntimeGenericParamInfoHandle { private readonly RuntimeStructs.GenericParamInfo* value; internal RuntimeGenericParamInfoHandle(RuntimeStructs.GenericParamInfo* value) { this.value = value; } internal RuntimeGenericParamInfoHandle(IntPtr ptr) { this.value = (RuntimeStructs.GenericParamInfo*)ptr; } internal Type[] Constraints => GetConstraints(); internal GenericParameterAttributes Attributes => (GenericParameterAttributes)value->flags; private Type[] GetConstraints() { int n = GetConstraintsCount(); var a = new Type[n]; for (int i = 0; i < n; i++) { RuntimeClassHandle c = new RuntimeClassHandle(value->constraints[i]); a[i] = Type.GetTypeFromHandle(c.GetTypeHandle())!; } return a; } private int GetConstraintsCount() { int i = 0; RuntimeStructs.MonoClass** p = value->constraints; while (p != null && *p != null) { p++; i++; } return i; } } internal struct RuntimeEventHandle : IEquatable<RuntimeEventHandle> { private readonly IntPtr value; internal RuntimeEventHandle(IntPtr v) { value = v; } public IntPtr Value => value; public override bool Equals(object? obj) { if (obj == null || GetType() != obj.GetType()) return false; return value == ((RuntimeEventHandle)obj).Value; } public bool Equals(RuntimeEventHandle handle) { return value == handle.Value; } public override int GetHashCode() { return value.GetHashCode(); } public static bool operator ==(RuntimeEventHandle left, RuntimeEventHandle right) { return left.Equals(right); } public static bool operator !=(RuntimeEventHandle left, RuntimeEventHandle right) { return !left.Equals(right); } } internal struct RuntimePropertyHandle : IEquatable<RuntimePropertyHandle> { private readonly IntPtr value; internal RuntimePropertyHandle(IntPtr v) { value = v; } public IntPtr Value => value; public override bool Equals(object? obj) { if (obj == null || GetType() != obj.GetType()) return false; return value == ((RuntimePropertyHandle)obj).Value; } public bool Equals(RuntimePropertyHandle handle) { return value == handle.Value; } public override int GetHashCode() { return value.GetHashCode(); } public static bool operator ==(RuntimePropertyHandle left, RuntimePropertyHandle right) { return left.Equals(right); } public static bool operator !=(RuntimePropertyHandle left, RuntimePropertyHandle right) { return !left.Equals(right); } } internal unsafe struct RuntimeGPtrArrayHandle { private RuntimeStructs.GPtrArray* value; internal RuntimeGPtrArrayHandle(RuntimeStructs.GPtrArray* value) { this.value = value; } internal RuntimeGPtrArrayHandle(IntPtr ptr) { this.value = (RuntimeStructs.GPtrArray*)ptr; } internal int Length => value->len; internal IntPtr this[int i] => Lookup(i); internal IntPtr Lookup(int i) { if (i >= 0 && i < Length) { return value->data[i]; } else throw new IndexOutOfRangeException(); } [MethodImpl(MethodImplOptions.InternalCall)] private static extern void GPtrArrayFree(RuntimeStructs.GPtrArray* value); internal static void DestroyAndFree(ref RuntimeGPtrArrayHandle h) { GPtrArrayFree(h.value); h.value = null; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Reflection; using System.Runtime.CompilerServices; namespace Mono { internal unsafe struct RuntimeClassHandle : IEquatable<RuntimeClassHandle> { private readonly RuntimeStructs.MonoClass* value; internal RuntimeClassHandle(RuntimeStructs.MonoClass* value) { this.value = value; } internal RuntimeClassHandle(IntPtr ptr) { this.value = (RuntimeStructs.MonoClass*)ptr; } internal RuntimeStructs.MonoClass* Value => value; public override bool Equals(object? obj) { if (obj == null || GetType() != obj.GetType()) return false; return value == ((RuntimeClassHandle)obj).Value; } public override int GetHashCode() => ((IntPtr)value).GetHashCode(); public bool Equals(RuntimeClassHandle handle) { return value == handle.Value; } public static bool operator ==(RuntimeClassHandle left, object? right) { return right != null && right is RuntimeClassHandle rch && left.Equals(rch); } public static bool operator !=(RuntimeClassHandle left, object? right) { return !(left == right); } public static bool operator ==(object? left, RuntimeClassHandle right) { return left != null && left is RuntimeClassHandle rch && rch.Equals(right); } public static bool operator !=(object? left, RuntimeClassHandle right) { return !(left == right); } [MethodImpl(MethodImplOptions.InternalCall)] internal static extern unsafe IntPtr GetTypeFromClass(RuntimeStructs.MonoClass* klass); internal RuntimeTypeHandle GetTypeHandle() => new RuntimeTypeHandle(GetTypeFromClass(value)); } internal unsafe struct RuntimeRemoteClassHandle { private readonly RuntimeStructs.RemoteClass* value; internal RuntimeRemoteClassHandle(RuntimeStructs.RemoteClass* value) { this.value = value; } internal RuntimeClassHandle ProxyClass { get { return new RuntimeClassHandle(value->proxy_class); } } } internal unsafe struct RuntimeGenericParamInfoHandle { private readonly RuntimeStructs.GenericParamInfo* value; internal RuntimeGenericParamInfoHandle(RuntimeStructs.GenericParamInfo* value) { this.value = value; } internal RuntimeGenericParamInfoHandle(IntPtr ptr) { this.value = (RuntimeStructs.GenericParamInfo*)ptr; } internal Type[] Constraints => GetConstraints(); internal GenericParameterAttributes Attributes => (GenericParameterAttributes)value->flags; private Type[] GetConstraints() { int n = GetConstraintsCount(); var a = new Type[n]; for (int i = 0; i < n; i++) { RuntimeClassHandle c = new RuntimeClassHandle(value->constraints[i]); a[i] = Type.GetTypeFromHandle(c.GetTypeHandle())!; } return a; } private int GetConstraintsCount() { int i = 0; RuntimeStructs.MonoClass** p = value->constraints; while (p != null && *p != null) { p++; i++; } return i; } } internal struct RuntimeEventHandle : IEquatable<RuntimeEventHandle> { private readonly IntPtr value; internal RuntimeEventHandle(IntPtr v) { value = v; } public IntPtr Value => value; public override bool Equals(object? obj) { if (obj == null || GetType() != obj.GetType()) return false; return value == ((RuntimeEventHandle)obj).Value; } public bool Equals(RuntimeEventHandle handle) { return value == handle.Value; } public override int GetHashCode() { return value.GetHashCode(); } public static bool operator ==(RuntimeEventHandle left, RuntimeEventHandle right) { return left.Equals(right); } public static bool operator !=(RuntimeEventHandle left, RuntimeEventHandle right) { return !left.Equals(right); } } internal struct RuntimePropertyHandle : IEquatable<RuntimePropertyHandle> { private readonly IntPtr value; internal RuntimePropertyHandle(IntPtr v) { value = v; } public IntPtr Value => value; public override bool Equals(object? obj) { if (obj == null || GetType() != obj.GetType()) return false; return value == ((RuntimePropertyHandle)obj).Value; } public bool Equals(RuntimePropertyHandle handle) { return value == handle.Value; } public override int GetHashCode() { return value.GetHashCode(); } public static bool operator ==(RuntimePropertyHandle left, RuntimePropertyHandle right) { return left.Equals(right); } public static bool operator !=(RuntimePropertyHandle left, RuntimePropertyHandle right) { return !left.Equals(right); } } internal unsafe struct RuntimeGPtrArrayHandle { private RuntimeStructs.GPtrArray* value; internal RuntimeGPtrArrayHandle(RuntimeStructs.GPtrArray* value) { this.value = value; } internal RuntimeGPtrArrayHandle(IntPtr ptr) { this.value = (RuntimeStructs.GPtrArray*)ptr; } internal int Length => value->len; internal IntPtr this[int i] => Lookup(i); internal IntPtr Lookup(int i) { if (i >= 0 && i < Length) { return value->data[i]; } else throw new IndexOutOfRangeException(); } [MethodImpl(MethodImplOptions.InternalCall)] private static extern void GPtrArrayFree(RuntimeStructs.GPtrArray* value); internal static void DestroyAndFree(ref RuntimeGPtrArrayHandle h) { GPtrArrayFree(h.value); h.value = null; } } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/libraries/System.Text.Json/src/System/Text/Json/JsonPropertyDictionary.KeyCollection.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; namespace System.Text.Json { internal sealed partial class JsonPropertyDictionary<T> { private KeyCollection? _keyCollection; public ICollection<string> GetKeyCollection() { return _keyCollection ??= new KeyCollection(this); } private sealed class KeyCollection : ICollection<string> { private readonly JsonPropertyDictionary<T> _parent; public KeyCollection(JsonPropertyDictionary<T> jsonObject) { _parent = jsonObject; } public int Count => _parent.Count; public bool IsReadOnly => true; IEnumerator IEnumerable.GetEnumerator() { foreach (KeyValuePair<string, T?> item in _parent) { yield return item.Key; } } public void Add(string propertyName) => ThrowHelper.ThrowNotSupportedException_NodeCollectionIsReadOnly(); public void Clear() => ThrowHelper.ThrowNotSupportedException_NodeCollectionIsReadOnly(); public bool Contains(string propertyName) => _parent.ContainsProperty(propertyName); public void CopyTo(string[] propertyNameArray, int index) { if (index < 0) { ThrowHelper.ThrowArgumentOutOfRangeException_NodeArrayIndexNegative(nameof(index)); } foreach (KeyValuePair<string, T?> item in _parent) { if (index >= propertyNameArray.Length) { ThrowHelper.ThrowArgumentException_NodeArrayTooSmall(nameof(propertyNameArray)); } propertyNameArray[index++] = item.Key; } } public IEnumerator<string> GetEnumerator() { foreach (KeyValuePair<string, T?> item in _parent) { yield return item.Key; } } bool ICollection<string>.Remove(string propertyName) => throw ThrowHelper.GetNotSupportedException_NodeCollectionIsReadOnly(); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Generic; namespace System.Text.Json { internal sealed partial class JsonPropertyDictionary<T> { private KeyCollection? _keyCollection; public ICollection<string> GetKeyCollection() { return _keyCollection ??= new KeyCollection(this); } private sealed class KeyCollection : ICollection<string> { private readonly JsonPropertyDictionary<T> _parent; public KeyCollection(JsonPropertyDictionary<T> jsonObject) { _parent = jsonObject; } public int Count => _parent.Count; public bool IsReadOnly => true; IEnumerator IEnumerable.GetEnumerator() { foreach (KeyValuePair<string, T?> item in _parent) { yield return item.Key; } } public void Add(string propertyName) => ThrowHelper.ThrowNotSupportedException_NodeCollectionIsReadOnly(); public void Clear() => ThrowHelper.ThrowNotSupportedException_NodeCollectionIsReadOnly(); public bool Contains(string propertyName) => _parent.ContainsProperty(propertyName); public void CopyTo(string[] propertyNameArray, int index) { if (index < 0) { ThrowHelper.ThrowArgumentOutOfRangeException_NodeArrayIndexNegative(nameof(index)); } foreach (KeyValuePair<string, T?> item in _parent) { if (index >= propertyNameArray.Length) { ThrowHelper.ThrowArgumentException_NodeArrayTooSmall(nameof(propertyNameArray)); } propertyNameArray[index++] = item.Key; } } public IEnumerator<string> GetEnumerator() { foreach (KeyValuePair<string, T?> item in _parent) { yield return item.Key; } } bool ICollection<string>.Remove(string propertyName) => throw ThrowHelper.GetNotSupportedException_NodeCollectionIsReadOnly(); } } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest69/Generated69.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated69.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated69.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/tests/JIT/Generics/Instantiation/Interfaces/class01.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="Class01.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="Class01.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/tests/Loader/classloader/MethodImpl/CovariantReturns/Interfaces/UnitTest.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { } .assembly extern xunit.core {} .assembly extern System.Runtime { } .assembly extern mscorlib { } .assembly UnitTest { } // ======================================================================================== // Types that will be used as return types on the various methods // ======================================================================================== .class interface public auto ansi abstract IUnused1 { } .class interface public auto ansi abstract IUnused2 { } .class interface public auto ansi abstract IA implements IUnused1, IUnused2 { } .class interface public auto ansi abstract IB implements IUnused1, IUnused2, IA { } .class interface public auto ansi abstract IC implements IUnused1, IUnused2, IB { } .class interface public auto ansi abstract IGenRetType<X,Y> { } .class interface public auto ansi abstract IDictionary<KEY,VAL> { } .class public auto ansi abstract CA {} .class public auto ansi abstract CB extends CA {} .class public auto ansi abstract CC extends CB {} .class interface public auto ansi abstract ICovariant<+ T> { } .class interface public auto ansi abstract IContravariant<- T> { } .class interface public auto ansi abstract IGenDerive1<V1,V2> implements IUnused1, IUnused2, class IGenRetType<!V1,!V2> { } .class interface public auto ansi abstract IGenDerive2<U1,U2,U3> implements IUnused1, IUnused2, class IGenDerive1<!U1, class IDictionary<!U3,!U2>> { } .class interface public auto ansi abstract IGenDerive3<T1,T2> implements IUnused1, IUnused2, class IGenDerive2<!T1,!T2,string> { } .class interface public auto ansi abstract INonGenericDerived1<ARG1,ARG2> implements IUnused1, IUnused2, class IGenRetType<!ARG1,!ARG2> { } .class interface public auto ansi abstract INonGenericDerived2<T> implements IUnused1, IUnused2, class INonGenericDerived1<!T,object> { } .class interface public auto ansi abstract INonGenericDerived3 implements IUnused1, IUnused2, class INonGenericDerived2<int32> { } .class interface public auto ansi abstract INonGenericDerived4 implements IUnused1, IUnused2, INonGenericDerived3 { } .class interface public auto ansi abstract IGenToNonGen1<V1,V2> implements IUnused1, IUnused2, IC { } .class interface public auto ansi abstract IGenToNonGen2<U1,U2,U3> implements IUnused1, IUnused2, class IGenToNonGen1<!U1, class IDictionary<!U3,!U2>> { } .class interface public auto ansi abstract IGenToNonGen3<T1,T2> implements IUnused1, IUnused2, class IGenToNonGen2<!T1,!T2,string> { } .class interface public auto ansi abstract INonGenThroughGen1<V1,V2> implements IUnused1, IUnused2, IC { } .class interface public auto ansi abstract INonGenThroughGen2<U1,U2,U3> implements IUnused1, IUnused2, class INonGenThroughGen1<!U1, class IDictionary<!U3,!U2>> { } .class interface public auto ansi abstract INonGenThroughGen3 implements IUnused1, IUnused2, class INonGenThroughGen2<object,int32,string> { } .class interface public auto ansi abstract INonGenThroughGen4 implements IUnused1, IUnused2, INonGenThroughGen3 { } // class implementing the interfaces .class public auto ansi beforefieldinit NonGenThroughGen4 implements IUnused1, IUnused2, INonGenThroughGen4 { } .class public auto ansi beforefieldinit GenToNonGen3<T1,T2> implements IUnused1, IUnused2, class IGenToNonGen3<!T1,!T2> { } .class public auto ansi beforefieldinit NonGenericDerived4 implements IUnused1, IUnused2, INonGenericDerived4 { } .class public auto ansi beforefieldinit GenDerive3<T1,T2> implements IUnused1, IUnused2, class IGenDerive3<!T1,!T2> { } .class public auto ansi beforefieldinit C implements IUnused1, IUnused2, IC { } .class public auto ansi beforefieldinit GenRetType<X,Y> implements IUnused1, IUnused2, class IGenRetType<!X,!Y> { } .class public auto ansi beforefieldinit Base<T> {} .class public auto ansi beforefieldinit Derived extends class Base<class Derived> {} .class public auto ansi beforefieldinit Derived2 extends class Base<class Derived2> {} .class interface public auto ansi abstract IVariant<+ V> { .method public hidebysig newslot virtual instance void Test() { ret } } // ======================================================================================== // Main base type with various virtual methods that will be overriden later // ======================================================================================== .class public auto ansi beforefieldinit GenBaseType<A,B> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance object MyFunc(string& res) { ldarg.1 ldstr "object GenBaseType.MyFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IB MyFunc(string& res) { ldarg.1 ldstr "IB GenBaseType.MyFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class ICovariant<class CB> MyFuncCovariant(string& res) { ldarg.1 ldstr "ICovariant<CB> GenBaseType.MyFuncCovariant()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IContravariant<class CB> MyFuncContravariant(string& res) { ldarg.1 ldstr "IContravariant<CB> GenBaseType.MyFuncContravariant()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IB GenToNonGen(string& res) { ldarg.1 ldstr "IB GenBaseType.GenToNonGen()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IB NonGenThroughGenFunc(string& res) { ldarg.1 ldstr "IB GenBaseType.NonGenThroughGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenRetType<int32,object> MyGenFunc(string& res) { ldarg.1 ldstr "IGenRetType<int32,object> GenBaseType.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenRetType<!A, class IDictionary<string,!B>> MyGenFunc(string& res) { ldarg.1 ldstr "IGenRetType<!A, class IDictionary<string,!B>> GenBaseType.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IVariant<class Base<class Derived>> MultiLevelGenericVariantFunc(string&) { ldarg.1 ldstr "IVariant<class Base<class Derived>> GenBaseType.MultiLevelGenericVariantFunc()" stind.ref ldnull ret } } // ======================================================================================== // SECOND LAYER type: overrides *SOME* virtuals on GenBaseType using MethodImpls with // covariant return types (more derived return types) // ======================================================================================== .class public auto ansi beforefieldinit GenMiddleType<U,V> extends class GenBaseType<!V,!U> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance class INonGenThroughGen2<object,int32,string> NonGenThroughGenFunc(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IB class GenBaseType<!V,!U>::NonGenThroughGenFunc(string& res) ldarg.1 ldstr "INonGenThroughGen2<object,int32,string> GenMiddleType.NonGenThroughGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenToNonGen1<!V, class IDictionary<string,object>> GenToNonGen(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IB class GenBaseType<!V,!U>::GenToNonGen(string& res) ldarg.1 ldstr "IGenToNonGen1<!V, class IDictionary<string,object>> GenMiddleType.GenToNonGen()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class INonGenericDerived1<int32,object> MyGenFunc(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IGenRetType<int32,object> class GenBaseType<!V,!U>::MyGenFunc(string& res) ldarg.1 ldstr "INonGenericDerived1<int32,object> GenMiddleType.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenDerive1<!V, class IDictionary<string,!U>> MyGenFunc(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IGenRetType<!0, class IDictionary<string,!1>> class GenBaseType<!V,!U>::MyGenFunc(string& res) ldarg.1 ldstr "IGenDerive1<!U, class IDictionary<string,!V>> GenMiddleType.MyGenFunc()" stind.ref ldnull ret } } // ======================================================================================== // THIRD LAYER type: overrides all virtuals from GenBaseType using MethodImpls with // covariant return types (more derived return types than the ones used in GenMiddleType) // ======================================================================================== .class public auto ansi beforefieldinit GenTestType<UNUSED1,UNUSED2,U,V> extends class GenMiddleType<!V,!U> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance class INonGenThroughGen4 NonGenThroughGenFunc(string& res) { .override method instance class IB class GenBaseType<!U,!V>::NonGenThroughGenFunc(string& res) ldarg.1 ldstr "INonGenThroughGen4 TestType.NonGenThroughGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenToNonGen3<!U,object> GenToNonGen(string& res) { .override method instance class IB class GenBaseType<!U,!V>::GenToNonGen(string& res) ldarg.1 ldstr "IGenToNonGen3<!U,object> TestType.GenToNonGen()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class INonGenericDerived4 MyGenFunc(string& res) { .override method instance class IGenRetType<int32,object> class GenBaseType<!U,!V>::MyGenFunc(string& res) ldarg.1 ldstr "INonGenericDerived4 TestType.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenDerive3<!U,!V> MyGenFunc(string& res) { .override method instance class IGenRetType<!0, class IDictionary<string,!1>> class GenBaseType<!U,!V>::MyGenFunc(string& res) ldarg.1 ldstr "IGenDerive3<!U,!V> TestType.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenRetType<!U,!V> MyFunc(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance object class GenBaseType<!U,!V>::MyFunc(string& res) ldarg.1 ldstr "IGenRetType<!U,!V> TestType.MyFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IC MyFunc(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IB class GenBaseType<!U,!V>::MyFunc(string& res) ldarg.1 ldstr "IC TestType.MyFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class ICovariant<class CC> MyFuncCovariant(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class ICovariant<class CB> class GenBaseType<!U,!V>::MyFuncCovariant(string& res) ldarg.1 ldstr "ICovariant<CC> TestType.MyFuncCovariant()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IContravariant<class CA> MyFuncContravariant(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IContravariant<class CB> class GenBaseType<!U,!V>::MyFuncContravariant(string& res) ldarg.1 ldstr "IContravariant<CA> TestType.MyFuncContravariant()" stind.ref ldnull ret } // ======================================================================================== // Set of implicit overrides that should be ignored given there are explicit overrides from the MethodImpls // ======================================================================================== .method public hidebysig virtual instance class IB NonGenThroughGenFunc(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance class IB GenToNonGen(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance class IGenRetType<int32,object> MyGenFunc(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance class IGenRetType<!0, class IDictionary<string,!1>> MyGenFunc(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance object MyFunc(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance class IB MyFunc(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance class ICovariant<class CB> MyFuncCovariant(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance class IContravariant<class CB> MyFuncContravariant(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } } // ======================================================================================== // FOURTH LAYER type: overrides all virtuals from GenBaseType using MethodImpls with // covariant return types (classes that implement the interfaces used as return types) // ======================================================================================== .class public auto ansi beforefieldinit GenMoreDerived<UNUSED1,UNUSED2,U,V> extends class GenTestType<!UNUSED2,!UNUSED1,!U,!V> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance class NonGenThroughGen4 NonGenThroughGenFunc(string& res) { .override method instance class IB class GenBaseType<!U,!V>::NonGenThroughGenFunc(string& res) ldarg.1 ldstr "class NonGenThroughGen4 GenMoreDerived.NonGenThroughGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class GenToNonGen3<!U,object> GenToNonGen(string& res) { .override method instance class IB class GenBaseType<!U,!V>::GenToNonGen(string& res) ldarg.1 ldstr "class GenToNonGen3<!U,object> GenMoreDerived.GenToNonGen()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class NonGenericDerived4 MyGenFunc(string& res) { .override method instance class IGenRetType<int32,object> class GenBaseType<!U,!V>::MyGenFunc(string& res) ldarg.1 ldstr "class NonGenericDerived4 GenMoreDerived.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class GenDerive3<!U,!V> MyGenFunc(string& res) { .override method instance class IGenRetType<!0, class IDictionary<string,!1>> class GenBaseType<!U,!V>::MyGenFunc(string& res) ldarg.1 ldstr "class GenDerive3<!U,!V> GenMoreDerived.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class GenRetType<!U,!V> MyFunc(string& res) { .override method instance object class GenBaseType<!U,!V>::MyFunc(string& res) ldarg.1 ldstr "class GenRetType<!U,!V> GenMoreDerived.MyFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class C MyFunc(string& res) { .override method instance class IB class GenBaseType<!U,!V>::MyFunc(string& res) ldarg.1 ldstr "class C GenMoreDerived.MyFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IVariant<class Derived> MultiLevelGenericVariantFunc(string&) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IVariant<class Base<class Derived>> class GenBaseType<!U,!V>::MultiLevelGenericVariantFunc(string&) ldarg.1 ldstr "class IVariant<class Derived> GenMoreDerived.MultiLevelGenericVariantFunc()" stind.ref ldnull ret } } // ======================================================================================== // FIFTH LAYER INVALID type: Used to verify we can't override the method using a compatible interface // if it has been already overridden using a class that implements the interface (i.e. the new // interface return type won't be compatible with the class return type on the previous override // ======================================================================================== .class public auto ansi beforefieldinit Invalid1<Q,W> extends class GenMoreDerived<!Q,!W,!Q,!W> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance class INonGenThroughGen4 NonGenThroughGenFunc(string& res) { .override method instance class IB class GenBaseType<!Q,!W>::NonGenThroughGenFunc(string& res) ldnull ret } } // ======================================================================================== // FIFTH LAYER INVALID type: Used to verify we can't override the method using a less derived interface // than one that has already been used in a previous override // ======================================================================================== .class public auto ansi beforefieldinit Invalid2<Q,W> extends class GenTestType<!Q,!W,!Q,!W> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance class INonGenThroughGen2<object,int32,string> NonGenThroughGenFunc(string& res) { .override method instance class IB class GenBaseType<!Q,!W>::NonGenThroughGenFunc(string& res) ldnull ret } } .class public auto ansi beforefieldinit Invalid3<Q,W> extends class GenTestType<!Q,!W,!Q,!W> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance class IVariant<class Derived2> MultiLevelGenericVariantFunc(string&) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IVariant<class Base<class Derived>> class GenBaseType<!Q,!W>::MultiLevelGenericVariantFunc(string&) ldnull ret } } // ======================================================================================== .class public auto ansi beforefieldinit CMain extends [mscorlib]System.Object { .method private hidebysig static bool CheckResults ( string expected, string a, [opt] string b, [opt] string c, [opt] string d) cil managed { .param [3] = nullref .param [4] = nullref .param [5] = nullref // Method begins at RVA 0x20a0 // Code size 164 (0xa4) .maxstack 2 .locals /* 11000002 */ init ( [0] bool ) IL_0000: ldarg.1 IL_0001: ldarg.0 IL_0002: call bool [System.Runtime]System.String::op_Equality(string, string) /* 0A000012 */ IL_0007: stloc.0 IL_0008: ldstr "EXPECTED: " /* 70000001 */ IL_000d: ldarg.0 IL_000e: call string [System.Runtime]System.String::Concat(string, string) /* 0A000013 */ IL_0013: call void [System.Console]System.Console::WriteLine(string) /* 0A000014 */ IL_0018: ldstr "ACTUAL1 : " /* 70000017 */ IL_001d: ldarg.1 IL_001e: call string [System.Runtime]System.String::Concat(string, string) /* 0A000013 */ IL_0023: call void [System.Console]System.Console::WriteLine(string) /* 0A000014 */ IL_0028: ldarg.2 IL_0029: call bool [System.Runtime]System.String::IsNullOrEmpty(string) /* 0A000015 */ IL_002e: brtrue.s IL_004e IL_0030: ldstr "ACTUAL2 : " /* 7000002D */ IL_0035: ldarg.2 IL_0036: call string [System.Runtime]System.String::Concat(string, string) /* 0A000013 */ IL_003b: call void [System.Console]System.Console::WriteLine(string) /* 0A000014 */ IL_0040: ldloc.0 IL_0041: brfalse.s IL_004c IL_0043: ldarg.2 IL_0044: ldarg.0 IL_0045: call bool [System.Runtime]System.String::op_Equality(string, string) /* 0A000012 */ IL_004a: br.s IL_004d IL_004c: ldc.i4.0 IL_004d: stloc.0 IL_004e: ldarg.3 IL_004f: call bool [System.Runtime]System.String::IsNullOrEmpty(string) /* 0A000015 */ IL_0054: brtrue.s IL_0074 IL_0056: ldstr "ACTUAL3 : " /* 70000043 */ IL_005b: ldarg.3 IL_005c: call string [System.Runtime]System.String::Concat(string, string) /* 0A000013 */ IL_0061: call void [System.Console]System.Console::WriteLine(string) /* 0A000014 */ IL_0066: ldloc.0 IL_0067: brfalse.s IL_0072 IL_0069: ldarg.3 IL_006a: ldarg.0 IL_006b: call bool [System.Runtime]System.String::op_Equality(string, string) /* 0A000012 */ IL_0070: br.s IL_0073 IL_0072: ldc.i4.0 IL_0073: stloc.0 IL_0074: ldarg.s d IL_0076: call bool [System.Runtime]System.String::IsNullOrEmpty(string) /* 0A000015 */ IL_007b: brtrue.s IL_009d IL_007d: ldstr "ACTUAL4 : " /* 70000059 */ IL_0082: ldarg.s d IL_0084: call string [System.Runtime]System.String::Concat(string, string) /* 0A000013 */ IL_0089: call void [System.Console]System.Console::WriteLine(string) /* 0A000014 */ IL_008e: ldloc.0 IL_008f: brfalse.s IL_009b IL_0091: ldarg.s d IL_0093: ldarg.0 IL_0094: call bool [System.Runtime]System.String::op_Equality(string, string) /* 0A000012 */ IL_0099: br.s IL_009c IL_009b: ldc.i4.0 IL_009c: stloc.0 IL_009d: call void [System.Console]System.Console::WriteLine() /* 0A000016 */ IL_00a2: ldloc.0 IL_00a3: ret } // end of method Program::CheckResults // ============== Test using GenTestType ============== // // These test methods will callvirt each method using: // 1) The signature from GetBaseType // 2) The signature from GenMiddleType with covariant returns (when applicable) // 3) The signature from GenTestType with covariant returns // And verify that the override on GetTestType is the one that executes .method public static bool RunTest1() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance object class GenBaseType<int32,object>::MyFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class IGenRetType<!2,!3> class GenTestType<int32,object,int32,object>::MyFunc(string&) pop ldstr "IGenRetType<!U,!V> TestType.MyFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest2() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::MyFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class IC class GenTestType<int32,object,int32,object>::MyFunc(string&) pop ldstr "IC TestType.MyFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest3() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IGenRetType<!0, class IDictionary<string,!1>> class GenBaseType<int32,object>::MyGenFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class IGenDerive1<!1, class IDictionary<string,!0>> class GenMiddleType<object,int32>::MyGenFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 2 callvirt instance class IGenDerive3<!2,!3> class GenTestType<int32,object,int32,object>::MyGenFunc(string&) pop ldstr "IGenDerive3<!U,!V> TestType.MyGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest4() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IGenRetType<int32,object> class GenBaseType<int32,object>::MyGenFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class INonGenericDerived1<int32,object> class GenMiddleType<object,int32>::MyGenFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 2 callvirt instance class INonGenericDerived4 class GenTestType<int32,object,int32,object>::MyGenFunc(string&) pop ldstr "INonGenericDerived4 TestType.MyGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest5() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::GenToNonGen(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class IGenToNonGen1<!1, class IDictionary<string,object>> class GenMiddleType<object,int32>::GenToNonGen(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 2 callvirt instance class IGenToNonGen3<!2,object> class GenTestType<int32,object,int32,object>::GenToNonGen(string&) pop ldstr "IGenToNonGen3<!U,object> TestType.GenToNonGen()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest6() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::NonGenThroughGenFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class INonGenThroughGen2<object,int32,string> class GenMiddleType<object,int32>::NonGenThroughGenFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 2 callvirt instance class INonGenThroughGen4 class GenTestType<int32,object,int32,object>::NonGenThroughGenFunc(string&) pop ldstr "INonGenThroughGen4 TestType.NonGenThroughGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest7() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class ICovariant<class CB> class GenBaseType<int32,object>::MyFuncCovariant(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class ICovariant<class CC> class GenTestType<int32,object,int32,object>::MyFuncCovariant(string&) pop ldstr "ICovariant<CC> TestType.MyFuncCovariant()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest8() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IContravariant<class CB> class GenBaseType<int32,object>::MyFuncContravariant(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class IContravariant<class CA> class GenTestType<int32,object,int32,object>::MyFuncContravariant(string&) pop ldstr "IContravariant<CA> TestType.MyFuncContravariant()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest9() noinlining { .locals init (string res1) newobj instance void class GenMoreDerived<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IVariant<class Derived> class GenMoreDerived<int32,object,int32,object>::MultiLevelGenericVariantFunc(string&) pop ldstr "class IVariant<class Derived> GenMoreDerived.MultiLevelGenericVariantFunc()" ldloc.0 ldnull ldnull ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } // ============== Test using GenMiddleType ============== // // These test methods will callvirt each method using: // 1) The signature from GetBaseType // 2) The signature from GenMiddleType with covariant returns // And verify that the override on GenMiddleType is the one that executes .method public static bool RunTest_Middle1() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 0 callvirt instance class IGenRetType<!0, class IDictionary<string,!1>> class GenBaseType<int32,object>::MyGenFunc(string&) pop newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 1 callvirt instance class IGenDerive1<!1, class IDictionary<string,!0>> class GenMiddleType<object,int32>::MyGenFunc(string&) pop ldstr "IGenDerive1<!U, class IDictionary<string,!V>> GenMiddleType.MyGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_Middle2() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 0 callvirt instance class IGenRetType<int32,object> class GenBaseType<int32,object>::MyGenFunc(string&) pop newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 1 callvirt instance class INonGenericDerived1<int32,object> class GenMiddleType<object,int32>::MyGenFunc(string&) pop ldstr "INonGenericDerived1<int32,object> GenMiddleType.MyGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_Middle3() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::GenToNonGen(string&) pop newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 1 callvirt instance class IGenToNonGen1<!1, class IDictionary<string,object>> class GenMiddleType<object,int32>::GenToNonGen(string&) pop ldstr "IGenToNonGen1<!V, class IDictionary<string,object>> GenMiddleType.GenToNonGen()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_Middle4() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::NonGenThroughGenFunc(string&) pop newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 1 callvirt instance class INonGenThroughGen2<object,int32,string> class GenMiddleType<object,int32>::NonGenThroughGenFunc(string&) pop ldstr "INonGenThroughGen2<object,int32,string> GenMiddleType.NonGenThroughGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } // ============== Test using GenMoreDerived ============== // // These test methods will callvirt each method using: // 1) The signature from GetBaseType // 2) The signature from GenMiddleType with covariant returns (when applicable) // 3) The signature from GenTestType with covariant returns // 4) The signature from GenMoreDerived with covariant returns // And verify that the override on GenMoreDerived is the one that executes .method public static bool RunTest_MoreDerived1() noinlining { .locals init (string res1, string res2, string res3, string res4) newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 0 callvirt instance object class GenBaseType<int32,object>::MyFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 1 callvirt instance class IGenRetType<!2,!3> class GenTestType<int32,object,int32,object>::MyFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 2 callvirt instance class GenRetType<!2,!3> class GenMoreDerived<object,int32,int32,object>::MyFunc(string&) pop ldstr "class GenRetType<!U,!V> GenMoreDerived.MyFunc()" ldloc.0 ldloc.1 ldloc.2 ldloc.3 call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_MoreDerived2() noinlining { .locals init (string res1, string res2, string res3, string res4) newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::MyFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 1 callvirt instance class IC class GenTestType<int32,object,int32,object>::MyFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 2 callvirt instance class C class GenMoreDerived<object,int32,int32,object>::MyFunc(string&) pop ldstr "class C GenMoreDerived.MyFunc()" ldloc.0 ldloc.1 ldloc.2 ldloc.3 call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_MoreDerived3() noinlining { .locals init (string res1, string res2, string res3, string res4) newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 0 callvirt instance class IGenRetType<!0, class IDictionary<string,!1>> class GenBaseType<int32,object>::MyGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 1 callvirt instance class IGenDerive1<!1, class IDictionary<string,!0>> class GenMiddleType<object,int32>::MyGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 2 callvirt instance class IGenDerive3<!2,!3> class GenTestType<int32,object,int32,object>::MyGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 3 callvirt instance class GenDerive3<!2,!3> class GenMoreDerived<object,int32,int32,object>::MyGenFunc(string&) pop ldstr "class GenDerive3<!U,!V> GenMoreDerived.MyGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldloc.3 call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_MoreDerived4() noinlining { .locals init (string res1, string res2, string res3, string res4) newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 0 callvirt instance class IGenRetType<int32,object> class GenBaseType<int32,object>::MyGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 1 callvirt instance class INonGenericDerived1<int32,object> class GenMiddleType<object,int32>::MyGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 2 callvirt instance class INonGenericDerived4 class GenTestType<int32,object,int32,object>::MyGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 3 callvirt instance class NonGenericDerived4 class GenMoreDerived<object,int32,int32,object>::MyGenFunc(string&) pop ldstr "class NonGenericDerived4 GenMoreDerived.MyGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldloc.3 call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_MoreDerived5() noinlining { .locals init (string res1, string res2, string res3, string res4) newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::GenToNonGen(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 1 callvirt instance class IGenToNonGen1<!1, class IDictionary<string,object>> class GenMiddleType<object,int32>::GenToNonGen(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 2 callvirt instance class IGenToNonGen3<!2,object> class GenTestType<int32,object,int32,object>::GenToNonGen(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 3 callvirt instance class GenToNonGen3<!2,object> class GenMoreDerived<object,int32,int32,object>::GenToNonGen(string&) pop ldstr "class GenToNonGen3<!U,object> GenMoreDerived.GenToNonGen()" ldloc.0 ldloc.1 ldloc.2 ldloc.3 call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_MoreDerived6() noinlining { .locals init (string res1, string res2, string res3, string res4) newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::NonGenThroughGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 1 callvirt instance class INonGenThroughGen2<object,int32,string> class GenMiddleType<object,int32>::NonGenThroughGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 2 callvirt instance class INonGenThroughGen4 class GenTestType<int32,object,int32,object>::NonGenThroughGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 3 callvirt instance class NonGenThroughGen4 class GenMoreDerived<object,int32,int32,object>::NonGenThroughGenFunc(string&) pop ldstr "class NonGenThroughGen4 GenMoreDerived.NonGenThroughGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldloc.3 call bool CMain::CheckResults(string,string,string,string,string) ret } // ===================================================================================== // .method public static void RunTest_Invalid1() noinlining { newobj instance void class Invalid1<int32,object>::.ctor() call void [System.Console]System.Console::WriteLine(object) ret } .method public static void RunTest_Invalid2() noinlining { newobj instance void class Invalid2<int32,object>::.ctor() call void [System.Console]System.Console::WriteLine(object) ret } .method public static void RunTest_Invalid3() noinlining { newobj instance void class Invalid3<int32,object>::.ctor() callvirt instance class IVariant<class Derived2> class Invalid3<int32,object>::MultiLevelGenericVariantFunc() pop ret } // ===================================================================================== // .method public hidebysig static int32 Main( string[] args) cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 2 .locals init ( bool result ) ldc.i4.1 stloc.0 T1: call bool CMain::RunTest1() brtrue.s T2 ldc.i4.0 stloc.0 T2: call bool CMain::RunTest2() brtrue.s T3 ldc.i4.0 stloc.0 T3: call bool CMain::RunTest3() brtrue.s T4 ldc.i4.0 stloc.0 T4: call bool CMain::RunTest4() brtrue.s T5 ldc.i4.0 stloc.0 T5: call bool CMain::RunTest5() brtrue.s T6 ldc.i4.0 stloc.0 T6: call bool CMain::RunTest6() brtrue.s T7 ldc.i4.0 stloc.0 T7: call bool CMain::RunTest7() brtrue.s T8 ldc.i4.0 stloc.0 T8: call bool CMain::RunTest8() brtrue.s T9 ldc.i4.0 stloc.0 T9: call bool CMain::RunTest9() brtrue.s M1 ldc.i4.0 stloc.0 // ===================================================================================== // M1: call bool CMain::RunTest_Middle1() brtrue.s M2 ldc.i4.0 stloc.0 M2: call bool CMain::RunTest_Middle2() brtrue.s M3 ldc.i4.0 stloc.0 M3: call bool CMain::RunTest_Middle3() brtrue.s M4 ldc.i4.0 stloc.0 M4: call bool CMain::RunTest_Middle4() brtrue.s MOREDERIVED1 ldc.i4.0 stloc.0 // ===================================================================================== // MOREDERIVED1: call bool CMain::RunTest_MoreDerived1() brtrue.s MOREDERIVED2 ldc.i4.0 stloc.0 MOREDERIVED2: call bool CMain::RunTest_MoreDerived2() brtrue.s MOREDERIVED3 ldc.i4.0 stloc.0 MOREDERIVED3: call bool CMain::RunTest_MoreDerived3() brtrue.s MOREDERIVED4 ldc.i4.0 stloc.0 MOREDERIVED4: call bool CMain::RunTest_MoreDerived4() brtrue.s MOREDERIVED5 ldc.i4.0 stloc.0 MOREDERIVED5: call bool CMain::RunTest_MoreDerived5() brtrue.s MOREDERIVED6 ldc.i4.0 stloc.0 MOREDERIVED6: call bool CMain::RunTest_MoreDerived6() brtrue.s INVALID1 ldc.i4.0 stloc.0 // ===================================================================================== // INVALID1: .try { call void CMain::RunTest_Invalid1() ldc.i4.0 stloc.0 ldstr "FAIL: did not catch expected TypeLoadException when loading Invalid1." call void [System.Console]System.Console::WriteLine(string) leave.s INVALID2 } catch [mscorlib]System.TypeLoadException { ldstr "Caught expected TypeLoadException:" call void [System.Console]System.Console::WriteLine(string) call void [System.Console]System.Console::WriteLine(object) leave.s INVALID2 } INVALID2: .try { call void CMain::RunTest_Invalid2() ldc.i4.0 stloc.0 ldstr "FAIL: did not catch expected TypeLoadException when loading Invalid2." call void [System.Console]System.Console::WriteLine(string) leave.s INVALID3 } catch [mscorlib]System.TypeLoadException { ldstr "Caught expected TypeLoadException:" call void [System.Console]System.Console::WriteLine(string) call void [System.Console]System.Console::WriteLine(object) leave.s INVALID3 } INVALID3: .try { call void CMain::RunTest_Invalid3() ldc.i4.0 stloc.0 ldstr "FAIL: did not catch expected TypeLoadException when loading DerivedClassFail1." call void [System.Console]System.Console::WriteLine(string) leave.s DONE } catch [mscorlib]System.TypeLoadException { ldstr "Caught expected TypeLoadException:" call void [System.Console]System.Console::WriteLine(string) call void [System.Console]System.Console::WriteLine(object) leave.s DONE } // ===================================================================================== // DONE: ldloc.0 brtrue.s PASS ldstr "Test FAILED" call void [System.Console]System.Console::WriteLine(string) ldc.i4.s 101 ret PASS: ldstr "Test PASSED" call void [System.Console]System.Console::WriteLine(string) ldc.i4.s 100 ret ldc.i4.s 100 ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { .maxstack 8 ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { } .assembly extern xunit.core {} .assembly extern System.Runtime { } .assembly extern mscorlib { } .assembly UnitTest { } // ======================================================================================== // Types that will be used as return types on the various methods // ======================================================================================== .class interface public auto ansi abstract IUnused1 { } .class interface public auto ansi abstract IUnused2 { } .class interface public auto ansi abstract IA implements IUnused1, IUnused2 { } .class interface public auto ansi abstract IB implements IUnused1, IUnused2, IA { } .class interface public auto ansi abstract IC implements IUnused1, IUnused2, IB { } .class interface public auto ansi abstract IGenRetType<X,Y> { } .class interface public auto ansi abstract IDictionary<KEY,VAL> { } .class public auto ansi abstract CA {} .class public auto ansi abstract CB extends CA {} .class public auto ansi abstract CC extends CB {} .class interface public auto ansi abstract ICovariant<+ T> { } .class interface public auto ansi abstract IContravariant<- T> { } .class interface public auto ansi abstract IGenDerive1<V1,V2> implements IUnused1, IUnused2, class IGenRetType<!V1,!V2> { } .class interface public auto ansi abstract IGenDerive2<U1,U2,U3> implements IUnused1, IUnused2, class IGenDerive1<!U1, class IDictionary<!U3,!U2>> { } .class interface public auto ansi abstract IGenDerive3<T1,T2> implements IUnused1, IUnused2, class IGenDerive2<!T1,!T2,string> { } .class interface public auto ansi abstract INonGenericDerived1<ARG1,ARG2> implements IUnused1, IUnused2, class IGenRetType<!ARG1,!ARG2> { } .class interface public auto ansi abstract INonGenericDerived2<T> implements IUnused1, IUnused2, class INonGenericDerived1<!T,object> { } .class interface public auto ansi abstract INonGenericDerived3 implements IUnused1, IUnused2, class INonGenericDerived2<int32> { } .class interface public auto ansi abstract INonGenericDerived4 implements IUnused1, IUnused2, INonGenericDerived3 { } .class interface public auto ansi abstract IGenToNonGen1<V1,V2> implements IUnused1, IUnused2, IC { } .class interface public auto ansi abstract IGenToNonGen2<U1,U2,U3> implements IUnused1, IUnused2, class IGenToNonGen1<!U1, class IDictionary<!U3,!U2>> { } .class interface public auto ansi abstract IGenToNonGen3<T1,T2> implements IUnused1, IUnused2, class IGenToNonGen2<!T1,!T2,string> { } .class interface public auto ansi abstract INonGenThroughGen1<V1,V2> implements IUnused1, IUnused2, IC { } .class interface public auto ansi abstract INonGenThroughGen2<U1,U2,U3> implements IUnused1, IUnused2, class INonGenThroughGen1<!U1, class IDictionary<!U3,!U2>> { } .class interface public auto ansi abstract INonGenThroughGen3 implements IUnused1, IUnused2, class INonGenThroughGen2<object,int32,string> { } .class interface public auto ansi abstract INonGenThroughGen4 implements IUnused1, IUnused2, INonGenThroughGen3 { } // class implementing the interfaces .class public auto ansi beforefieldinit NonGenThroughGen4 implements IUnused1, IUnused2, INonGenThroughGen4 { } .class public auto ansi beforefieldinit GenToNonGen3<T1,T2> implements IUnused1, IUnused2, class IGenToNonGen3<!T1,!T2> { } .class public auto ansi beforefieldinit NonGenericDerived4 implements IUnused1, IUnused2, INonGenericDerived4 { } .class public auto ansi beforefieldinit GenDerive3<T1,T2> implements IUnused1, IUnused2, class IGenDerive3<!T1,!T2> { } .class public auto ansi beforefieldinit C implements IUnused1, IUnused2, IC { } .class public auto ansi beforefieldinit GenRetType<X,Y> implements IUnused1, IUnused2, class IGenRetType<!X,!Y> { } .class public auto ansi beforefieldinit Base<T> {} .class public auto ansi beforefieldinit Derived extends class Base<class Derived> {} .class public auto ansi beforefieldinit Derived2 extends class Base<class Derived2> {} .class interface public auto ansi abstract IVariant<+ V> { .method public hidebysig newslot virtual instance void Test() { ret } } // ======================================================================================== // Main base type with various virtual methods that will be overriden later // ======================================================================================== .class public auto ansi beforefieldinit GenBaseType<A,B> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance object MyFunc(string& res) { ldarg.1 ldstr "object GenBaseType.MyFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IB MyFunc(string& res) { ldarg.1 ldstr "IB GenBaseType.MyFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class ICovariant<class CB> MyFuncCovariant(string& res) { ldarg.1 ldstr "ICovariant<CB> GenBaseType.MyFuncCovariant()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IContravariant<class CB> MyFuncContravariant(string& res) { ldarg.1 ldstr "IContravariant<CB> GenBaseType.MyFuncContravariant()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IB GenToNonGen(string& res) { ldarg.1 ldstr "IB GenBaseType.GenToNonGen()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IB NonGenThroughGenFunc(string& res) { ldarg.1 ldstr "IB GenBaseType.NonGenThroughGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenRetType<int32,object> MyGenFunc(string& res) { ldarg.1 ldstr "IGenRetType<int32,object> GenBaseType.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenRetType<!A, class IDictionary<string,!B>> MyGenFunc(string& res) { ldarg.1 ldstr "IGenRetType<!A, class IDictionary<string,!B>> GenBaseType.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IVariant<class Base<class Derived>> MultiLevelGenericVariantFunc(string&) { ldarg.1 ldstr "IVariant<class Base<class Derived>> GenBaseType.MultiLevelGenericVariantFunc()" stind.ref ldnull ret } } // ======================================================================================== // SECOND LAYER type: overrides *SOME* virtuals on GenBaseType using MethodImpls with // covariant return types (more derived return types) // ======================================================================================== .class public auto ansi beforefieldinit GenMiddleType<U,V> extends class GenBaseType<!V,!U> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance class INonGenThroughGen2<object,int32,string> NonGenThroughGenFunc(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IB class GenBaseType<!V,!U>::NonGenThroughGenFunc(string& res) ldarg.1 ldstr "INonGenThroughGen2<object,int32,string> GenMiddleType.NonGenThroughGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenToNonGen1<!V, class IDictionary<string,object>> GenToNonGen(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IB class GenBaseType<!V,!U>::GenToNonGen(string& res) ldarg.1 ldstr "IGenToNonGen1<!V, class IDictionary<string,object>> GenMiddleType.GenToNonGen()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class INonGenericDerived1<int32,object> MyGenFunc(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IGenRetType<int32,object> class GenBaseType<!V,!U>::MyGenFunc(string& res) ldarg.1 ldstr "INonGenericDerived1<int32,object> GenMiddleType.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenDerive1<!V, class IDictionary<string,!U>> MyGenFunc(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IGenRetType<!0, class IDictionary<string,!1>> class GenBaseType<!V,!U>::MyGenFunc(string& res) ldarg.1 ldstr "IGenDerive1<!U, class IDictionary<string,!V>> GenMiddleType.MyGenFunc()" stind.ref ldnull ret } } // ======================================================================================== // THIRD LAYER type: overrides all virtuals from GenBaseType using MethodImpls with // covariant return types (more derived return types than the ones used in GenMiddleType) // ======================================================================================== .class public auto ansi beforefieldinit GenTestType<UNUSED1,UNUSED2,U,V> extends class GenMiddleType<!V,!U> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance class INonGenThroughGen4 NonGenThroughGenFunc(string& res) { .override method instance class IB class GenBaseType<!U,!V>::NonGenThroughGenFunc(string& res) ldarg.1 ldstr "INonGenThroughGen4 TestType.NonGenThroughGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenToNonGen3<!U,object> GenToNonGen(string& res) { .override method instance class IB class GenBaseType<!U,!V>::GenToNonGen(string& res) ldarg.1 ldstr "IGenToNonGen3<!U,object> TestType.GenToNonGen()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class INonGenericDerived4 MyGenFunc(string& res) { .override method instance class IGenRetType<int32,object> class GenBaseType<!U,!V>::MyGenFunc(string& res) ldarg.1 ldstr "INonGenericDerived4 TestType.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenDerive3<!U,!V> MyGenFunc(string& res) { .override method instance class IGenRetType<!0, class IDictionary<string,!1>> class GenBaseType<!U,!V>::MyGenFunc(string& res) ldarg.1 ldstr "IGenDerive3<!U,!V> TestType.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IGenRetType<!U,!V> MyFunc(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance object class GenBaseType<!U,!V>::MyFunc(string& res) ldarg.1 ldstr "IGenRetType<!U,!V> TestType.MyFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IC MyFunc(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IB class GenBaseType<!U,!V>::MyFunc(string& res) ldarg.1 ldstr "IC TestType.MyFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class ICovariant<class CC> MyFuncCovariant(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class ICovariant<class CB> class GenBaseType<!U,!V>::MyFuncCovariant(string& res) ldarg.1 ldstr "ICovariant<CC> TestType.MyFuncCovariant()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IContravariant<class CA> MyFuncContravariant(string& res) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IContravariant<class CB> class GenBaseType<!U,!V>::MyFuncContravariant(string& res) ldarg.1 ldstr "IContravariant<CA> TestType.MyFuncContravariant()" stind.ref ldnull ret } // ======================================================================================== // Set of implicit overrides that should be ignored given there are explicit overrides from the MethodImpls // ======================================================================================== .method public hidebysig virtual instance class IB NonGenThroughGenFunc(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance class IB GenToNonGen(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance class IGenRetType<int32,object> MyGenFunc(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance class IGenRetType<!0, class IDictionary<string,!1>> MyGenFunc(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance object MyFunc(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance class IB MyFunc(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance class ICovariant<class CB> MyFuncCovariant(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } .method public hidebysig virtual instance class IContravariant<class CB> MyFuncContravariant(string& res) { ldstr "Should never execute this method" newobj instance void [System.Runtime]System.Exception::.ctor(string) throw } } // ======================================================================================== // FOURTH LAYER type: overrides all virtuals from GenBaseType using MethodImpls with // covariant return types (classes that implement the interfaces used as return types) // ======================================================================================== .class public auto ansi beforefieldinit GenMoreDerived<UNUSED1,UNUSED2,U,V> extends class GenTestType<!UNUSED2,!UNUSED1,!U,!V> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance class NonGenThroughGen4 NonGenThroughGenFunc(string& res) { .override method instance class IB class GenBaseType<!U,!V>::NonGenThroughGenFunc(string& res) ldarg.1 ldstr "class NonGenThroughGen4 GenMoreDerived.NonGenThroughGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class GenToNonGen3<!U,object> GenToNonGen(string& res) { .override method instance class IB class GenBaseType<!U,!V>::GenToNonGen(string& res) ldarg.1 ldstr "class GenToNonGen3<!U,object> GenMoreDerived.GenToNonGen()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class NonGenericDerived4 MyGenFunc(string& res) { .override method instance class IGenRetType<int32,object> class GenBaseType<!U,!V>::MyGenFunc(string& res) ldarg.1 ldstr "class NonGenericDerived4 GenMoreDerived.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class GenDerive3<!U,!V> MyGenFunc(string& res) { .override method instance class IGenRetType<!0, class IDictionary<string,!1>> class GenBaseType<!U,!V>::MyGenFunc(string& res) ldarg.1 ldstr "class GenDerive3<!U,!V> GenMoreDerived.MyGenFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class GenRetType<!U,!V> MyFunc(string& res) { .override method instance object class GenBaseType<!U,!V>::MyFunc(string& res) ldarg.1 ldstr "class GenRetType<!U,!V> GenMoreDerived.MyFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class C MyFunc(string& res) { .override method instance class IB class GenBaseType<!U,!V>::MyFunc(string& res) ldarg.1 ldstr "class C GenMoreDerived.MyFunc()" stind.ref ldnull ret } .method public hidebysig newslot virtual instance class IVariant<class Derived> MultiLevelGenericVariantFunc(string&) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IVariant<class Base<class Derived>> class GenBaseType<!U,!V>::MultiLevelGenericVariantFunc(string&) ldarg.1 ldstr "class IVariant<class Derived> GenMoreDerived.MultiLevelGenericVariantFunc()" stind.ref ldnull ret } } // ======================================================================================== // FIFTH LAYER INVALID type: Used to verify we can't override the method using a compatible interface // if it has been already overridden using a class that implements the interface (i.e. the new // interface return type won't be compatible with the class return type on the previous override // ======================================================================================== .class public auto ansi beforefieldinit Invalid1<Q,W> extends class GenMoreDerived<!Q,!W,!Q,!W> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance class INonGenThroughGen4 NonGenThroughGenFunc(string& res) { .override method instance class IB class GenBaseType<!Q,!W>::NonGenThroughGenFunc(string& res) ldnull ret } } // ======================================================================================== // FIFTH LAYER INVALID type: Used to verify we can't override the method using a less derived interface // than one that has already been used in a previous override // ======================================================================================== .class public auto ansi beforefieldinit Invalid2<Q,W> extends class GenTestType<!Q,!W,!Q,!W> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance class INonGenThroughGen2<object,int32,string> NonGenThroughGenFunc(string& res) { .override method instance class IB class GenBaseType<!Q,!W>::NonGenThroughGenFunc(string& res) ldnull ret } } .class public auto ansi beforefieldinit Invalid3<Q,W> extends class GenTestType<!Q,!W,!Q,!W> { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ret } .method public hidebysig newslot virtual instance class IVariant<class Derived2> MultiLevelGenericVariantFunc(string&) { .custom instance void [System.Runtime]System.Runtime.CompilerServices.PreserveBaseOverridesAttribute::.ctor() = (01 00 00 00) .override method instance class IVariant<class Base<class Derived>> class GenBaseType<!Q,!W>::MultiLevelGenericVariantFunc(string&) ldnull ret } } // ======================================================================================== .class public auto ansi beforefieldinit CMain extends [mscorlib]System.Object { .method private hidebysig static bool CheckResults ( string expected, string a, [opt] string b, [opt] string c, [opt] string d) cil managed { .param [3] = nullref .param [4] = nullref .param [5] = nullref // Method begins at RVA 0x20a0 // Code size 164 (0xa4) .maxstack 2 .locals /* 11000002 */ init ( [0] bool ) IL_0000: ldarg.1 IL_0001: ldarg.0 IL_0002: call bool [System.Runtime]System.String::op_Equality(string, string) /* 0A000012 */ IL_0007: stloc.0 IL_0008: ldstr "EXPECTED: " /* 70000001 */ IL_000d: ldarg.0 IL_000e: call string [System.Runtime]System.String::Concat(string, string) /* 0A000013 */ IL_0013: call void [System.Console]System.Console::WriteLine(string) /* 0A000014 */ IL_0018: ldstr "ACTUAL1 : " /* 70000017 */ IL_001d: ldarg.1 IL_001e: call string [System.Runtime]System.String::Concat(string, string) /* 0A000013 */ IL_0023: call void [System.Console]System.Console::WriteLine(string) /* 0A000014 */ IL_0028: ldarg.2 IL_0029: call bool [System.Runtime]System.String::IsNullOrEmpty(string) /* 0A000015 */ IL_002e: brtrue.s IL_004e IL_0030: ldstr "ACTUAL2 : " /* 7000002D */ IL_0035: ldarg.2 IL_0036: call string [System.Runtime]System.String::Concat(string, string) /* 0A000013 */ IL_003b: call void [System.Console]System.Console::WriteLine(string) /* 0A000014 */ IL_0040: ldloc.0 IL_0041: brfalse.s IL_004c IL_0043: ldarg.2 IL_0044: ldarg.0 IL_0045: call bool [System.Runtime]System.String::op_Equality(string, string) /* 0A000012 */ IL_004a: br.s IL_004d IL_004c: ldc.i4.0 IL_004d: stloc.0 IL_004e: ldarg.3 IL_004f: call bool [System.Runtime]System.String::IsNullOrEmpty(string) /* 0A000015 */ IL_0054: brtrue.s IL_0074 IL_0056: ldstr "ACTUAL3 : " /* 70000043 */ IL_005b: ldarg.3 IL_005c: call string [System.Runtime]System.String::Concat(string, string) /* 0A000013 */ IL_0061: call void [System.Console]System.Console::WriteLine(string) /* 0A000014 */ IL_0066: ldloc.0 IL_0067: brfalse.s IL_0072 IL_0069: ldarg.3 IL_006a: ldarg.0 IL_006b: call bool [System.Runtime]System.String::op_Equality(string, string) /* 0A000012 */ IL_0070: br.s IL_0073 IL_0072: ldc.i4.0 IL_0073: stloc.0 IL_0074: ldarg.s d IL_0076: call bool [System.Runtime]System.String::IsNullOrEmpty(string) /* 0A000015 */ IL_007b: brtrue.s IL_009d IL_007d: ldstr "ACTUAL4 : " /* 70000059 */ IL_0082: ldarg.s d IL_0084: call string [System.Runtime]System.String::Concat(string, string) /* 0A000013 */ IL_0089: call void [System.Console]System.Console::WriteLine(string) /* 0A000014 */ IL_008e: ldloc.0 IL_008f: brfalse.s IL_009b IL_0091: ldarg.s d IL_0093: ldarg.0 IL_0094: call bool [System.Runtime]System.String::op_Equality(string, string) /* 0A000012 */ IL_0099: br.s IL_009c IL_009b: ldc.i4.0 IL_009c: stloc.0 IL_009d: call void [System.Console]System.Console::WriteLine() /* 0A000016 */ IL_00a2: ldloc.0 IL_00a3: ret } // end of method Program::CheckResults // ============== Test using GenTestType ============== // // These test methods will callvirt each method using: // 1) The signature from GetBaseType // 2) The signature from GenMiddleType with covariant returns (when applicable) // 3) The signature from GenTestType with covariant returns // And verify that the override on GetTestType is the one that executes .method public static bool RunTest1() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance object class GenBaseType<int32,object>::MyFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class IGenRetType<!2,!3> class GenTestType<int32,object,int32,object>::MyFunc(string&) pop ldstr "IGenRetType<!U,!V> TestType.MyFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest2() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::MyFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class IC class GenTestType<int32,object,int32,object>::MyFunc(string&) pop ldstr "IC TestType.MyFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest3() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IGenRetType<!0, class IDictionary<string,!1>> class GenBaseType<int32,object>::MyGenFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class IGenDerive1<!1, class IDictionary<string,!0>> class GenMiddleType<object,int32>::MyGenFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 2 callvirt instance class IGenDerive3<!2,!3> class GenTestType<int32,object,int32,object>::MyGenFunc(string&) pop ldstr "IGenDerive3<!U,!V> TestType.MyGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest4() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IGenRetType<int32,object> class GenBaseType<int32,object>::MyGenFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class INonGenericDerived1<int32,object> class GenMiddleType<object,int32>::MyGenFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 2 callvirt instance class INonGenericDerived4 class GenTestType<int32,object,int32,object>::MyGenFunc(string&) pop ldstr "INonGenericDerived4 TestType.MyGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest5() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::GenToNonGen(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class IGenToNonGen1<!1, class IDictionary<string,object>> class GenMiddleType<object,int32>::GenToNonGen(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 2 callvirt instance class IGenToNonGen3<!2,object> class GenTestType<int32,object,int32,object>::GenToNonGen(string&) pop ldstr "IGenToNonGen3<!U,object> TestType.GenToNonGen()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest6() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::NonGenThroughGenFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class INonGenThroughGen2<object,int32,string> class GenMiddleType<object,int32>::NonGenThroughGenFunc(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 2 callvirt instance class INonGenThroughGen4 class GenTestType<int32,object,int32,object>::NonGenThroughGenFunc(string&) pop ldstr "INonGenThroughGen4 TestType.NonGenThroughGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest7() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class ICovariant<class CB> class GenBaseType<int32,object>::MyFuncCovariant(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class ICovariant<class CC> class GenTestType<int32,object,int32,object>::MyFuncCovariant(string&) pop ldstr "ICovariant<CC> TestType.MyFuncCovariant()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest8() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IContravariant<class CB> class GenBaseType<int32,object>::MyFuncContravariant(string&) pop newobj instance void class GenTestType<int32,object,int32,object>::.ctor() ldloca.s 1 callvirt instance class IContravariant<class CA> class GenTestType<int32,object,int32,object>::MyFuncContravariant(string&) pop ldstr "IContravariant<CA> TestType.MyFuncContravariant()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest9() noinlining { .locals init (string res1) newobj instance void class GenMoreDerived<int32,object,int32,object>::.ctor() ldloca.s 0 callvirt instance class IVariant<class Derived> class GenMoreDerived<int32,object,int32,object>::MultiLevelGenericVariantFunc(string&) pop ldstr "class IVariant<class Derived> GenMoreDerived.MultiLevelGenericVariantFunc()" ldloc.0 ldnull ldnull ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } // ============== Test using GenMiddleType ============== // // These test methods will callvirt each method using: // 1) The signature from GetBaseType // 2) The signature from GenMiddleType with covariant returns // And verify that the override on GenMiddleType is the one that executes .method public static bool RunTest_Middle1() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 0 callvirt instance class IGenRetType<!0, class IDictionary<string,!1>> class GenBaseType<int32,object>::MyGenFunc(string&) pop newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 1 callvirt instance class IGenDerive1<!1, class IDictionary<string,!0>> class GenMiddleType<object,int32>::MyGenFunc(string&) pop ldstr "IGenDerive1<!U, class IDictionary<string,!V>> GenMiddleType.MyGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_Middle2() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 0 callvirt instance class IGenRetType<int32,object> class GenBaseType<int32,object>::MyGenFunc(string&) pop newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 1 callvirt instance class INonGenericDerived1<int32,object> class GenMiddleType<object,int32>::MyGenFunc(string&) pop ldstr "INonGenericDerived1<int32,object> GenMiddleType.MyGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_Middle3() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::GenToNonGen(string&) pop newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 1 callvirt instance class IGenToNonGen1<!1, class IDictionary<string,object>> class GenMiddleType<object,int32>::GenToNonGen(string&) pop ldstr "IGenToNonGen1<!V, class IDictionary<string,object>> GenMiddleType.GenToNonGen()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_Middle4() noinlining { .locals init (string res1, string res2, string res3) newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::NonGenThroughGenFunc(string&) pop newobj instance void class GenMiddleType<object,int32>::.ctor() ldloca.s 1 callvirt instance class INonGenThroughGen2<object,int32,string> class GenMiddleType<object,int32>::NonGenThroughGenFunc(string&) pop ldstr "INonGenThroughGen2<object,int32,string> GenMiddleType.NonGenThroughGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldnull call bool CMain::CheckResults(string,string,string,string,string) ret } // ============== Test using GenMoreDerived ============== // // These test methods will callvirt each method using: // 1) The signature from GetBaseType // 2) The signature from GenMiddleType with covariant returns (when applicable) // 3) The signature from GenTestType with covariant returns // 4) The signature from GenMoreDerived with covariant returns // And verify that the override on GenMoreDerived is the one that executes .method public static bool RunTest_MoreDerived1() noinlining { .locals init (string res1, string res2, string res3, string res4) newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 0 callvirt instance object class GenBaseType<int32,object>::MyFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 1 callvirt instance class IGenRetType<!2,!3> class GenTestType<int32,object,int32,object>::MyFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 2 callvirt instance class GenRetType<!2,!3> class GenMoreDerived<object,int32,int32,object>::MyFunc(string&) pop ldstr "class GenRetType<!U,!V> GenMoreDerived.MyFunc()" ldloc.0 ldloc.1 ldloc.2 ldloc.3 call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_MoreDerived2() noinlining { .locals init (string res1, string res2, string res3, string res4) newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::MyFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 1 callvirt instance class IC class GenTestType<int32,object,int32,object>::MyFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 2 callvirt instance class C class GenMoreDerived<object,int32,int32,object>::MyFunc(string&) pop ldstr "class C GenMoreDerived.MyFunc()" ldloc.0 ldloc.1 ldloc.2 ldloc.3 call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_MoreDerived3() noinlining { .locals init (string res1, string res2, string res3, string res4) newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 0 callvirt instance class IGenRetType<!0, class IDictionary<string,!1>> class GenBaseType<int32,object>::MyGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 1 callvirt instance class IGenDerive1<!1, class IDictionary<string,!0>> class GenMiddleType<object,int32>::MyGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 2 callvirt instance class IGenDerive3<!2,!3> class GenTestType<int32,object,int32,object>::MyGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 3 callvirt instance class GenDerive3<!2,!3> class GenMoreDerived<object,int32,int32,object>::MyGenFunc(string&) pop ldstr "class GenDerive3<!U,!V> GenMoreDerived.MyGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldloc.3 call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_MoreDerived4() noinlining { .locals init (string res1, string res2, string res3, string res4) newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 0 callvirt instance class IGenRetType<int32,object> class GenBaseType<int32,object>::MyGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 1 callvirt instance class INonGenericDerived1<int32,object> class GenMiddleType<object,int32>::MyGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 2 callvirt instance class INonGenericDerived4 class GenTestType<int32,object,int32,object>::MyGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 3 callvirt instance class NonGenericDerived4 class GenMoreDerived<object,int32,int32,object>::MyGenFunc(string&) pop ldstr "class NonGenericDerived4 GenMoreDerived.MyGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldloc.3 call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_MoreDerived5() noinlining { .locals init (string res1, string res2, string res3, string res4) newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::GenToNonGen(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 1 callvirt instance class IGenToNonGen1<!1, class IDictionary<string,object>> class GenMiddleType<object,int32>::GenToNonGen(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 2 callvirt instance class IGenToNonGen3<!2,object> class GenTestType<int32,object,int32,object>::GenToNonGen(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 3 callvirt instance class GenToNonGen3<!2,object> class GenMoreDerived<object,int32,int32,object>::GenToNonGen(string&) pop ldstr "class GenToNonGen3<!U,object> GenMoreDerived.GenToNonGen()" ldloc.0 ldloc.1 ldloc.2 ldloc.3 call bool CMain::CheckResults(string,string,string,string,string) ret } .method public static bool RunTest_MoreDerived6() noinlining { .locals init (string res1, string res2, string res3, string res4) newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 0 callvirt instance class IB class GenBaseType<int32,object>::NonGenThroughGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 1 callvirt instance class INonGenThroughGen2<object,int32,string> class GenMiddleType<object,int32>::NonGenThroughGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 2 callvirt instance class INonGenThroughGen4 class GenTestType<int32,object,int32,object>::NonGenThroughGenFunc(string&) pop newobj instance void class GenMoreDerived<object,int32,int32,object>::.ctor() ldloca.s 3 callvirt instance class NonGenThroughGen4 class GenMoreDerived<object,int32,int32,object>::NonGenThroughGenFunc(string&) pop ldstr "class NonGenThroughGen4 GenMoreDerived.NonGenThroughGenFunc()" ldloc.0 ldloc.1 ldloc.2 ldloc.3 call bool CMain::CheckResults(string,string,string,string,string) ret } // ===================================================================================== // .method public static void RunTest_Invalid1() noinlining { newobj instance void class Invalid1<int32,object>::.ctor() call void [System.Console]System.Console::WriteLine(object) ret } .method public static void RunTest_Invalid2() noinlining { newobj instance void class Invalid2<int32,object>::.ctor() call void [System.Console]System.Console::WriteLine(object) ret } .method public static void RunTest_Invalid3() noinlining { newobj instance void class Invalid3<int32,object>::.ctor() callvirt instance class IVariant<class Derived2> class Invalid3<int32,object>::MultiLevelGenericVariantFunc() pop ret } // ===================================================================================== // .method public hidebysig static int32 Main( string[] args) cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 2 .locals init ( bool result ) ldc.i4.1 stloc.0 T1: call bool CMain::RunTest1() brtrue.s T2 ldc.i4.0 stloc.0 T2: call bool CMain::RunTest2() brtrue.s T3 ldc.i4.0 stloc.0 T3: call bool CMain::RunTest3() brtrue.s T4 ldc.i4.0 stloc.0 T4: call bool CMain::RunTest4() brtrue.s T5 ldc.i4.0 stloc.0 T5: call bool CMain::RunTest5() brtrue.s T6 ldc.i4.0 stloc.0 T6: call bool CMain::RunTest6() brtrue.s T7 ldc.i4.0 stloc.0 T7: call bool CMain::RunTest7() brtrue.s T8 ldc.i4.0 stloc.0 T8: call bool CMain::RunTest8() brtrue.s T9 ldc.i4.0 stloc.0 T9: call bool CMain::RunTest9() brtrue.s M1 ldc.i4.0 stloc.0 // ===================================================================================== // M1: call bool CMain::RunTest_Middle1() brtrue.s M2 ldc.i4.0 stloc.0 M2: call bool CMain::RunTest_Middle2() brtrue.s M3 ldc.i4.0 stloc.0 M3: call bool CMain::RunTest_Middle3() brtrue.s M4 ldc.i4.0 stloc.0 M4: call bool CMain::RunTest_Middle4() brtrue.s MOREDERIVED1 ldc.i4.0 stloc.0 // ===================================================================================== // MOREDERIVED1: call bool CMain::RunTest_MoreDerived1() brtrue.s MOREDERIVED2 ldc.i4.0 stloc.0 MOREDERIVED2: call bool CMain::RunTest_MoreDerived2() brtrue.s MOREDERIVED3 ldc.i4.0 stloc.0 MOREDERIVED3: call bool CMain::RunTest_MoreDerived3() brtrue.s MOREDERIVED4 ldc.i4.0 stloc.0 MOREDERIVED4: call bool CMain::RunTest_MoreDerived4() brtrue.s MOREDERIVED5 ldc.i4.0 stloc.0 MOREDERIVED5: call bool CMain::RunTest_MoreDerived5() brtrue.s MOREDERIVED6 ldc.i4.0 stloc.0 MOREDERIVED6: call bool CMain::RunTest_MoreDerived6() brtrue.s INVALID1 ldc.i4.0 stloc.0 // ===================================================================================== // INVALID1: .try { call void CMain::RunTest_Invalid1() ldc.i4.0 stloc.0 ldstr "FAIL: did not catch expected TypeLoadException when loading Invalid1." call void [System.Console]System.Console::WriteLine(string) leave.s INVALID2 } catch [mscorlib]System.TypeLoadException { ldstr "Caught expected TypeLoadException:" call void [System.Console]System.Console::WriteLine(string) call void [System.Console]System.Console::WriteLine(object) leave.s INVALID2 } INVALID2: .try { call void CMain::RunTest_Invalid2() ldc.i4.0 stloc.0 ldstr "FAIL: did not catch expected TypeLoadException when loading Invalid2." call void [System.Console]System.Console::WriteLine(string) leave.s INVALID3 } catch [mscorlib]System.TypeLoadException { ldstr "Caught expected TypeLoadException:" call void [System.Console]System.Console::WriteLine(string) call void [System.Console]System.Console::WriteLine(object) leave.s INVALID3 } INVALID3: .try { call void CMain::RunTest_Invalid3() ldc.i4.0 stloc.0 ldstr "FAIL: did not catch expected TypeLoadException when loading DerivedClassFail1." call void [System.Console]System.Console::WriteLine(string) leave.s DONE } catch [mscorlib]System.TypeLoadException { ldstr "Caught expected TypeLoadException:" call void [System.Console]System.Console::WriteLine(string) call void [System.Console]System.Console::WriteLine(object) leave.s DONE } // ===================================================================================== // DONE: ldloc.0 brtrue.s PASS ldstr "Test FAILED" call void [System.Console]System.Console::WriteLine(string) ldc.i4.s 101 ret PASS: ldstr "Test PASSED" call void [System.Console]System.Console::WriteLine(string) ldc.i4.s 100 ret ldc.i4.s 100 ret } .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { .maxstack 8 ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/tests/Interop/NativeLibrary/NativeLibraryToLoad/NativeLibraryToLoad.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.IO; using System.Reflection; using System.Runtime.InteropServices; public class NativeLibraryToLoad { public const string Name = "NativeLibrary"; public const string InvalidName = "DoesNotExist"; public static string GetFileName() { if (OperatingSystem.IsWindows()) return $"{Name}.dll"; if (OperatingSystem.IsLinux()) return $"lib{Name}.so"; if (OperatingSystem.IsMacOS()) return $"lib{Name}.dylib"; throw new PlatformNotSupportedException(); } public static string GetFullPath() { Assembly assembly = Assembly.GetExecutingAssembly(); string directory = Path.GetDirectoryName(assembly.Location); return Path.Combine(directory, GetFileName()); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.IO; using System.Reflection; using System.Runtime.InteropServices; public class NativeLibraryToLoad { public const string Name = "NativeLibrary"; public const string InvalidName = "DoesNotExist"; public static string GetFileName() { if (OperatingSystem.IsWindows()) return $"{Name}.dll"; if (OperatingSystem.IsLinux()) return $"lib{Name}.so"; if (OperatingSystem.IsMacOS()) return $"lib{Name}.dylib"; throw new PlatformNotSupportedException(); } public static string GetFullPath() { Assembly assembly = Assembly.GetExecutingAssembly(); string directory = Path.GetDirectoryName(assembly.Location); return Path.Combine(directory, GetFileName()); } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/libraries/System.Security.Cryptography.OpenSsl/tests/EcDsaOpenSslTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using System.Security.Cryptography.EcDsa.Tests; using Test.Cryptography; using Xunit; namespace System.Security.Cryptography.EcDsa.OpenSsl.Tests { public class EcDsaOpenSslTests : ECDsaTestsBase { [Fact] public void DefaultCtor() { using (ECDsaOpenSsl e = new ECDsaOpenSsl()) { int keySize = e.KeySize; Assert.Equal(521, keySize); e.Exercise(); } } [Fact] public void Ctor256() { int expectedKeySize = 256; using (ECDsaOpenSsl e = new ECDsaOpenSsl(expectedKeySize)) { int keySize = e.KeySize; Assert.Equal(expectedKeySize, keySize); e.Exercise(); } } [Fact] public void Ctor384() { int expectedKeySize = 384; using (ECDsaOpenSsl e = new ECDsaOpenSsl(expectedKeySize)) { int keySize = e.KeySize; Assert.Equal(expectedKeySize, keySize); e.Exercise(); } } [Fact] public void Ctor521() { int expectedKeySize = 521; using (ECDsaOpenSsl e = new ECDsaOpenSsl(expectedKeySize)) { int keySize = e.KeySize; Assert.Equal(expectedKeySize, keySize); e.Exercise(); } } [ConditionalFact(nameof(ECDsa224Available))] public void CtorHandle224() { IntPtr ecKey = Interop.Crypto.EcKeyCreateByOid(ECDSA_P224_OID_VALUE); Assert.NotEqual(IntPtr.Zero, ecKey); int success = Interop.Crypto.EcKeyGenerateKey(ecKey); Assert.NotEqual(0, success); using (ECDsaOpenSsl e = new ECDsaOpenSsl(ecKey)) { int keySize = e.KeySize; Assert.Equal(224, keySize); e.Exercise(); } Interop.Crypto.EcKeyDestroy(ecKey); } [Fact] public void CtorHandle384() { IntPtr ecKey = Interop.Crypto.EcKeyCreateByOid(ECDSA_P384_OID_VALUE); Assert.NotEqual(IntPtr.Zero, ecKey); int success = Interop.Crypto.EcKeyGenerateKey(ecKey); Assert.NotEqual(0, success); using (ECDsaOpenSsl e = new ECDsaOpenSsl(ecKey)) { int keySize = e.KeySize; Assert.Equal(384, keySize); e.Exercise(); } Interop.Crypto.EcKeyDestroy(ecKey); } [Fact] public void CtorHandle521() { IntPtr ecKey = Interop.Crypto.EcKeyCreateByOid(ECDSA_P521_OID_VALUE); Assert.NotEqual(IntPtr.Zero, ecKey); int success = Interop.Crypto.EcKeyGenerateKey(ecKey); Assert.NotEqual(0, success); using (ECDsaOpenSsl e = new ECDsaOpenSsl(ecKey)) { int keySize = e.KeySize; Assert.Equal(521, keySize); e.Exercise(); } Interop.Crypto.EcKeyDestroy(ecKey); } [Fact] public void CtorHandleDuplicate() { IntPtr ecKey = Interop.Crypto.EcKeyCreateByOid(ECDSA_P521_OID_VALUE); Assert.NotEqual(IntPtr.Zero, ecKey); int success = Interop.Crypto.EcKeyGenerateKey(ecKey); Assert.NotEqual(0, success); using (ECDsaOpenSsl e = new ECDsaOpenSsl(ecKey)) { // Make sure ECDsaOpenSsl did its own ref-count bump. Interop.Crypto.EcKeyDestroy(ecKey); int keySize = e.KeySize; Assert.Equal(521, keySize); e.Exercise(); } } [Fact] public void KeySizePropWithExercise() { using (ECDsaOpenSsl e = new ECDsaOpenSsl()) { e.KeySize = 384; Assert.Equal(384, e.KeySize); e.Exercise(); ECParameters p384 = e.ExportParameters(false); Assert.Equal(ECCurve.ECCurveType.Named, p384.Curve.CurveType); e.KeySize = 521; Assert.Equal(521, e.KeySize); e.Exercise(); ECParameters p521 = e.ExportParameters(false); Assert.Equal(ECCurve.ECCurveType.Named, p521.Curve.CurveType); // ensure the key was regenerated Assert.NotEqual(p384.Curve.Oid.Value, p521.Curve.Oid.Value); } } [Fact] public void VerifyDuplicateKey_ValidHandle() { byte[] data = ByteUtils.RepeatByte(0x71, 11); using (ECDsaOpenSsl first = new ECDsaOpenSsl()) using (SafeEvpPKeyHandle firstHandle = first.DuplicateKeyHandle()) { using (ECDsa second = new ECDsaOpenSsl(firstHandle)) { byte[] signed = second.SignData(data, HashAlgorithmName.SHA512); Assert.True(first.VerifyData(data, signed, HashAlgorithmName.SHA512)); } } } [Fact] public void VerifyDuplicateKey_DistinctHandles() { using (ECDsaOpenSsl first = new ECDsaOpenSsl()) using (SafeEvpPKeyHandle firstHandle = first.DuplicateKeyHandle()) using (SafeEvpPKeyHandle firstHandle2 = first.DuplicateKeyHandle()) { Assert.NotSame(firstHandle, firstHandle2); } } [Fact] public void VerifyDuplicateKey_RefCounts() { byte[] data = ByteUtils.RepeatByte(0x74, 11); byte[] signature; ECDsa second; using (ECDsaOpenSsl first = new ECDsaOpenSsl()) using (SafeEvpPKeyHandle firstHandle = first.DuplicateKeyHandle()) { signature = first.SignData(data, HashAlgorithmName.SHA384); second = new ECDsaOpenSsl(firstHandle); } // Now show that second still works, despite first and firstHandle being Disposed. using (second) { Assert.True(second.VerifyData(data, signature, HashAlgorithmName.SHA384)); } } [Fact] public void VerifyDuplicateKey_NullHandle() { SafeEvpPKeyHandle pkey = null; Assert.Throws<ArgumentNullException>(() => new ECDsaOpenSsl(pkey)); } [Fact] public void VerifyDuplicateKey_InvalidHandle() { using (ECDsaOpenSsl ecdsa = new ECDsaOpenSsl()) { SafeEvpPKeyHandle pkey = ecdsa.DuplicateKeyHandle(); using (pkey) { } AssertExtensions.Throws<ArgumentException>("pkeyHandle", () => new ECDsaOpenSsl(pkey)); } } [Fact] public void VerifyDuplicateKey_NeverValidHandle() { using (SafeEvpPKeyHandle pkey = new SafeEvpPKeyHandle(IntPtr.Zero, false)) { AssertExtensions.Throws<ArgumentException>("pkeyHandle", () => new ECDsaOpenSsl(pkey)); } } [Fact] public void VerifyDuplicateKey_RsaHandle() { using (RSAOpenSsl rsa = new RSAOpenSsl()) using (SafeEvpPKeyHandle pkey = rsa.DuplicateKeyHandle()) { Assert.ThrowsAny<CryptographicException>(() => new ECDsaOpenSsl(pkey)); } } [Fact] public void LookupCurveByOidValue() { ECDsaOpenSsl ec = null; ec = new ECDsaOpenSsl(ECCurve.CreateFromValue(ECDSA_P256_OID_VALUE)); // Same as nistP256 ECParameters param = ec.ExportParameters(false); param.Validate(); Assert.Equal(256, ec.KeySize); Assert.True(param.Curve.IsNamed); Assert.Equal("ECDSA_P256", param.Curve.Oid.FriendlyName); Assert.Equal(ECDSA_P256_OID_VALUE, param.Curve.Oid.Value); } [Theory] [InlineData("ECDSA_P521")] [InlineData("ECDSA_P384")] [InlineData("ECDSA_P256")] public void LookupCurveByOidWindowsFriendlyName(string friendlyName) { ECDsaOpenSsl ec = new ECDsaOpenSsl(ECCurve.CreateFromFriendlyName(friendlyName)); ECParameters param = ec.ExportParameters(false); param.Validate(); } [Fact] public void LookupCurveByOidWithInvalidThrowsPlatformNotSupported() { Assert.Throws<PlatformNotSupportedException>(() => { new ECDsaOpenSsl(ECCurve.CreateFromFriendlyName("Invalid")); }); } [Fact] public void LookupCurveByOidFriendlyName() { ECDsaOpenSsl ec = null; // prime256v1 is alias for nistP256 for OpenSsl ec = new ECDsaOpenSsl(ECCurve.CreateFromFriendlyName("prime256v1")); ECParameters param = ec.ExportParameters(false); param.Validate(); Assert.Equal(256, ec.KeySize); Assert.True(param.Curve.IsNamed); Assert.Equal("ECDSA_P256", param.Curve.Oid.FriendlyName); // OpenSsl maps prime256v1 to ECDSA_P256 Assert.Equal(ECDSA_P256_OID_VALUE, param.Curve.Oid.Value); // secp521r1 is same as nistP521; note Windows uses secP521r1 (uppercase P) ec = new ECDsaOpenSsl(ECCurve.CreateFromFriendlyName("secp521r1")); param = ec.ExportParameters(false); param.Validate(); Assert.Equal(521, ec.KeySize); Assert.True(param.Curve.IsNamed); Assert.Equal("ECDSA_P521", param.Curve.Oid.FriendlyName); // OpenSsl maps secp521r1 to ECDSA_P521 Assert.Equal(ECDSA_P521_OID_VALUE, param.Curve.Oid.Value); } } } internal static partial class Interop { internal static partial class Crypto { [DllImport(Libraries.CryptoNative, EntryPoint = "CryptoNative_EcKeyCreateByOid")] internal static extern IntPtr EcKeyCreateByOid(string oid); [DllImport(Libraries.CryptoNative, EntryPoint = "CryptoNative_EcKeyGenerateKey")] internal static extern int EcKeyGenerateKey(IntPtr ecKey); [DllImport(Libraries.CryptoNative, EntryPoint = "CryptoNative_EcKeyDestroy")] internal static extern void EcKeyDestroy(IntPtr r); [DllImport(Libraries.CryptoNative, EntryPoint = "CryptoNative_OpenSslVersionNumber")] internal static extern uint OpenSslVersionNumber(); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using System.Security.Cryptography.EcDsa.Tests; using Test.Cryptography; using Xunit; namespace System.Security.Cryptography.EcDsa.OpenSsl.Tests { public class EcDsaOpenSslTests : ECDsaTestsBase { [Fact] public void DefaultCtor() { using (ECDsaOpenSsl e = new ECDsaOpenSsl()) { int keySize = e.KeySize; Assert.Equal(521, keySize); e.Exercise(); } } [Fact] public void Ctor256() { int expectedKeySize = 256; using (ECDsaOpenSsl e = new ECDsaOpenSsl(expectedKeySize)) { int keySize = e.KeySize; Assert.Equal(expectedKeySize, keySize); e.Exercise(); } } [Fact] public void Ctor384() { int expectedKeySize = 384; using (ECDsaOpenSsl e = new ECDsaOpenSsl(expectedKeySize)) { int keySize = e.KeySize; Assert.Equal(expectedKeySize, keySize); e.Exercise(); } } [Fact] public void Ctor521() { int expectedKeySize = 521; using (ECDsaOpenSsl e = new ECDsaOpenSsl(expectedKeySize)) { int keySize = e.KeySize; Assert.Equal(expectedKeySize, keySize); e.Exercise(); } } [ConditionalFact(nameof(ECDsa224Available))] public void CtorHandle224() { IntPtr ecKey = Interop.Crypto.EcKeyCreateByOid(ECDSA_P224_OID_VALUE); Assert.NotEqual(IntPtr.Zero, ecKey); int success = Interop.Crypto.EcKeyGenerateKey(ecKey); Assert.NotEqual(0, success); using (ECDsaOpenSsl e = new ECDsaOpenSsl(ecKey)) { int keySize = e.KeySize; Assert.Equal(224, keySize); e.Exercise(); } Interop.Crypto.EcKeyDestroy(ecKey); } [Fact] public void CtorHandle384() { IntPtr ecKey = Interop.Crypto.EcKeyCreateByOid(ECDSA_P384_OID_VALUE); Assert.NotEqual(IntPtr.Zero, ecKey); int success = Interop.Crypto.EcKeyGenerateKey(ecKey); Assert.NotEqual(0, success); using (ECDsaOpenSsl e = new ECDsaOpenSsl(ecKey)) { int keySize = e.KeySize; Assert.Equal(384, keySize); e.Exercise(); } Interop.Crypto.EcKeyDestroy(ecKey); } [Fact] public void CtorHandle521() { IntPtr ecKey = Interop.Crypto.EcKeyCreateByOid(ECDSA_P521_OID_VALUE); Assert.NotEqual(IntPtr.Zero, ecKey); int success = Interop.Crypto.EcKeyGenerateKey(ecKey); Assert.NotEqual(0, success); using (ECDsaOpenSsl e = new ECDsaOpenSsl(ecKey)) { int keySize = e.KeySize; Assert.Equal(521, keySize); e.Exercise(); } Interop.Crypto.EcKeyDestroy(ecKey); } [Fact] public void CtorHandleDuplicate() { IntPtr ecKey = Interop.Crypto.EcKeyCreateByOid(ECDSA_P521_OID_VALUE); Assert.NotEqual(IntPtr.Zero, ecKey); int success = Interop.Crypto.EcKeyGenerateKey(ecKey); Assert.NotEqual(0, success); using (ECDsaOpenSsl e = new ECDsaOpenSsl(ecKey)) { // Make sure ECDsaOpenSsl did its own ref-count bump. Interop.Crypto.EcKeyDestroy(ecKey); int keySize = e.KeySize; Assert.Equal(521, keySize); e.Exercise(); } } [Fact] public void KeySizePropWithExercise() { using (ECDsaOpenSsl e = new ECDsaOpenSsl()) { e.KeySize = 384; Assert.Equal(384, e.KeySize); e.Exercise(); ECParameters p384 = e.ExportParameters(false); Assert.Equal(ECCurve.ECCurveType.Named, p384.Curve.CurveType); e.KeySize = 521; Assert.Equal(521, e.KeySize); e.Exercise(); ECParameters p521 = e.ExportParameters(false); Assert.Equal(ECCurve.ECCurveType.Named, p521.Curve.CurveType); // ensure the key was regenerated Assert.NotEqual(p384.Curve.Oid.Value, p521.Curve.Oid.Value); } } [Fact] public void VerifyDuplicateKey_ValidHandle() { byte[] data = ByteUtils.RepeatByte(0x71, 11); using (ECDsaOpenSsl first = new ECDsaOpenSsl()) using (SafeEvpPKeyHandle firstHandle = first.DuplicateKeyHandle()) { using (ECDsa second = new ECDsaOpenSsl(firstHandle)) { byte[] signed = second.SignData(data, HashAlgorithmName.SHA512); Assert.True(first.VerifyData(data, signed, HashAlgorithmName.SHA512)); } } } [Fact] public void VerifyDuplicateKey_DistinctHandles() { using (ECDsaOpenSsl first = new ECDsaOpenSsl()) using (SafeEvpPKeyHandle firstHandle = first.DuplicateKeyHandle()) using (SafeEvpPKeyHandle firstHandle2 = first.DuplicateKeyHandle()) { Assert.NotSame(firstHandle, firstHandle2); } } [Fact] public void VerifyDuplicateKey_RefCounts() { byte[] data = ByteUtils.RepeatByte(0x74, 11); byte[] signature; ECDsa second; using (ECDsaOpenSsl first = new ECDsaOpenSsl()) using (SafeEvpPKeyHandle firstHandle = first.DuplicateKeyHandle()) { signature = first.SignData(data, HashAlgorithmName.SHA384); second = new ECDsaOpenSsl(firstHandle); } // Now show that second still works, despite first and firstHandle being Disposed. using (second) { Assert.True(second.VerifyData(data, signature, HashAlgorithmName.SHA384)); } } [Fact] public void VerifyDuplicateKey_NullHandle() { SafeEvpPKeyHandle pkey = null; Assert.Throws<ArgumentNullException>(() => new ECDsaOpenSsl(pkey)); } [Fact] public void VerifyDuplicateKey_InvalidHandle() { using (ECDsaOpenSsl ecdsa = new ECDsaOpenSsl()) { SafeEvpPKeyHandle pkey = ecdsa.DuplicateKeyHandle(); using (pkey) { } AssertExtensions.Throws<ArgumentException>("pkeyHandle", () => new ECDsaOpenSsl(pkey)); } } [Fact] public void VerifyDuplicateKey_NeverValidHandle() { using (SafeEvpPKeyHandle pkey = new SafeEvpPKeyHandle(IntPtr.Zero, false)) { AssertExtensions.Throws<ArgumentException>("pkeyHandle", () => new ECDsaOpenSsl(pkey)); } } [Fact] public void VerifyDuplicateKey_RsaHandle() { using (RSAOpenSsl rsa = new RSAOpenSsl()) using (SafeEvpPKeyHandle pkey = rsa.DuplicateKeyHandle()) { Assert.ThrowsAny<CryptographicException>(() => new ECDsaOpenSsl(pkey)); } } [Fact] public void LookupCurveByOidValue() { ECDsaOpenSsl ec = null; ec = new ECDsaOpenSsl(ECCurve.CreateFromValue(ECDSA_P256_OID_VALUE)); // Same as nistP256 ECParameters param = ec.ExportParameters(false); param.Validate(); Assert.Equal(256, ec.KeySize); Assert.True(param.Curve.IsNamed); Assert.Equal("ECDSA_P256", param.Curve.Oid.FriendlyName); Assert.Equal(ECDSA_P256_OID_VALUE, param.Curve.Oid.Value); } [Theory] [InlineData("ECDSA_P521")] [InlineData("ECDSA_P384")] [InlineData("ECDSA_P256")] public void LookupCurveByOidWindowsFriendlyName(string friendlyName) { ECDsaOpenSsl ec = new ECDsaOpenSsl(ECCurve.CreateFromFriendlyName(friendlyName)); ECParameters param = ec.ExportParameters(false); param.Validate(); } [Fact] public void LookupCurveByOidWithInvalidThrowsPlatformNotSupported() { Assert.Throws<PlatformNotSupportedException>(() => { new ECDsaOpenSsl(ECCurve.CreateFromFriendlyName("Invalid")); }); } [Fact] public void LookupCurveByOidFriendlyName() { ECDsaOpenSsl ec = null; // prime256v1 is alias for nistP256 for OpenSsl ec = new ECDsaOpenSsl(ECCurve.CreateFromFriendlyName("prime256v1")); ECParameters param = ec.ExportParameters(false); param.Validate(); Assert.Equal(256, ec.KeySize); Assert.True(param.Curve.IsNamed); Assert.Equal("ECDSA_P256", param.Curve.Oid.FriendlyName); // OpenSsl maps prime256v1 to ECDSA_P256 Assert.Equal(ECDSA_P256_OID_VALUE, param.Curve.Oid.Value); // secp521r1 is same as nistP521; note Windows uses secP521r1 (uppercase P) ec = new ECDsaOpenSsl(ECCurve.CreateFromFriendlyName("secp521r1")); param = ec.ExportParameters(false); param.Validate(); Assert.Equal(521, ec.KeySize); Assert.True(param.Curve.IsNamed); Assert.Equal("ECDSA_P521", param.Curve.Oid.FriendlyName); // OpenSsl maps secp521r1 to ECDSA_P521 Assert.Equal(ECDSA_P521_OID_VALUE, param.Curve.Oid.Value); } } } internal static partial class Interop { internal static partial class Crypto { [DllImport(Libraries.CryptoNative, EntryPoint = "CryptoNative_EcKeyCreateByOid")] internal static extern IntPtr EcKeyCreateByOid(string oid); [DllImport(Libraries.CryptoNative, EntryPoint = "CryptoNative_EcKeyGenerateKey")] internal static extern int EcKeyGenerateKey(IntPtr ecKey); [DllImport(Libraries.CryptoNative, EntryPoint = "CryptoNative_EcKeyDestroy")] internal static extern void EcKeyDestroy(IntPtr r); [DllImport(Libraries.CryptoNative, EntryPoint = "CryptoNative_OpenSslVersionNumber")] internal static extern uint OpenSslVersionNumber(); } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/tests/JIT/HardwareIntrinsics/X86/Sse3/AddSubtract.Single.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void AddSubtractSingle() { var test = new AlternatingBinaryOpTest__AddSubtractSingle(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Sse.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Sse.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); if (Sse.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Sse.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (Sse.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (Sse.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (Sse.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (Sse.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class AlternatingBinaryOpTest__AddSubtractSingle { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Single[] inArray1, Single[] inArray2, Single[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Single>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Single>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Single>(); if ((alignment != 32 && alignment != 16) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Single, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Single, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Single> _fld1; public Vector128<Single> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref testStruct._fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref testStruct._fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); return testStruct; } public void RunStructFldScenario(AlternatingBinaryOpTest__AddSubtractSingle testClass) { var result = Sse3.AddSubtract(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(AlternatingBinaryOpTest__AddSubtractSingle testClass) { fixed (Vector128<Single>* pFld1 = &_fld1) fixed (Vector128<Single>* pFld2 = &_fld2) { var result = Sse3.AddSubtract( Sse.LoadVector128((Single*)(pFld1)), Sse.LoadVector128((Single*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single); private static Single[] _data1 = new Single[Op1ElementCount]; private static Single[] _data2 = new Single[Op2ElementCount]; private static Vector128<Single> _clsVar1; private static Vector128<Single> _clsVar2; private Vector128<Single> _fld1; private Vector128<Single> _fld2; private DataTable _dataTable; static AlternatingBinaryOpTest__AddSubtractSingle() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _clsVar1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _clsVar2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); } public AlternatingBinaryOpTest__AddSubtractSingle() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } _dataTable = new DataTable(_data1, _data2, new Single[RetElementCount], LargestVectorSize); } public bool IsSupported => Sse3.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Sse3.AddSubtract( Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Sse3.AddSubtract( Sse.LoadVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadVector128((Single*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Sse3.AddSubtract( Sse.LoadAlignedVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadAlignedVector128((Single*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Sse3).GetMethod(nameof(Sse3.AddSubtract), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Single>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Sse3).GetMethod(nameof(Sse3.AddSubtract), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) }) .Invoke(null, new object[] { Sse.LoadVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadVector128((Single*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Single>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Sse3).GetMethod(nameof(Sse3.AddSubtract), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) }) .Invoke(null, new object[] { Sse.LoadAlignedVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadAlignedVector128((Single*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Single>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Sse3.AddSubtract( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Single>* pClsVar1 = &_clsVar1) fixed (Vector128<Single>* pClsVar2 = &_clsVar2) { var result = Sse3.AddSubtract( Sse.LoadVector128((Single*)(pClsVar1)), Sse.LoadVector128((Single*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr); var result = Sse3.AddSubtract(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = Sse.LoadVector128((Single*)(_dataTable.inArray1Ptr)); var op2 = Sse.LoadVector128((Single*)(_dataTable.inArray2Ptr)); var result = Sse3.AddSubtract(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var op1 = Sse.LoadAlignedVector128((Single*)(_dataTable.inArray1Ptr)); var op2 = Sse.LoadAlignedVector128((Single*)(_dataTable.inArray2Ptr)); var result = Sse3.AddSubtract(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new AlternatingBinaryOpTest__AddSubtractSingle(); var result = Sse3.AddSubtract(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new AlternatingBinaryOpTest__AddSubtractSingle(); fixed (Vector128<Single>* pFld1 = &test._fld1) fixed (Vector128<Single>* pFld2 = &test._fld2) { var result = Sse3.AddSubtract( Sse.LoadVector128((Single*)(pFld1)), Sse.LoadVector128((Single*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Sse3.AddSubtract(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Single>* pFld1 = &_fld1) fixed (Vector128<Single>* pFld2 = &_fld2) { var result = Sse3.AddSubtract( Sse.LoadVector128((Single*)(pFld1)), Sse.LoadVector128((Single*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Sse3.AddSubtract(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = Sse3.AddSubtract( Sse.LoadVector128((Single*)(&test._fld1)), Sse.LoadVector128((Single*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Single> op1, Vector128<Single> op2, void* result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Single[] outArray = new Single[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Single>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Single[] outArray = new Single[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Single>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Single[] left, Single[] right, Single[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i += 2) { if (BitConverter.SingleToInt32Bits(result[i]) != BitConverter.SingleToInt32Bits(left[i] - right[i])) { succeeded = false; break; } if (BitConverter.SingleToInt32Bits(result[i + 1]) != BitConverter.SingleToInt32Bits(left[i + 1] + right[i + 1])) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Sse3)}.{nameof(Sse3.AddSubtract)}<Single>(Vector128<Single>, Vector128<Single>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void AddSubtractSingle() { var test = new AlternatingBinaryOpTest__AddSubtractSingle(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Sse.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Sse.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); if (Sse.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Sse.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (Sse.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (Sse.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (Sse.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (Sse.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class AlternatingBinaryOpTest__AddSubtractSingle { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Single[] inArray1, Single[] inArray2, Single[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Single>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Single>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Single>(); if ((alignment != 32 && alignment != 16) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Single, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Single, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Single> _fld1; public Vector128<Single> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref testStruct._fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref testStruct._fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); return testStruct; } public void RunStructFldScenario(AlternatingBinaryOpTest__AddSubtractSingle testClass) { var result = Sse3.AddSubtract(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(AlternatingBinaryOpTest__AddSubtractSingle testClass) { fixed (Vector128<Single>* pFld1 = &_fld1) fixed (Vector128<Single>* pFld2 = &_fld2) { var result = Sse3.AddSubtract( Sse.LoadVector128((Single*)(pFld1)), Sse.LoadVector128((Single*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single); private static Single[] _data1 = new Single[Op1ElementCount]; private static Single[] _data2 = new Single[Op2ElementCount]; private static Vector128<Single> _clsVar1; private static Vector128<Single> _clsVar2; private Vector128<Single> _fld1; private Vector128<Single> _fld2; private DataTable _dataTable; static AlternatingBinaryOpTest__AddSubtractSingle() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _clsVar1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _clsVar2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); } public AlternatingBinaryOpTest__AddSubtractSingle() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); } _dataTable = new DataTable(_data1, _data2, new Single[RetElementCount], LargestVectorSize); } public bool IsSupported => Sse3.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Sse3.AddSubtract( Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Sse3.AddSubtract( Sse.LoadVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadVector128((Single*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Sse3.AddSubtract( Sse.LoadAlignedVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadAlignedVector128((Single*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Sse3).GetMethod(nameof(Sse3.AddSubtract), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Single>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Sse3).GetMethod(nameof(Sse3.AddSubtract), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) }) .Invoke(null, new object[] { Sse.LoadVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadVector128((Single*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Single>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Sse3).GetMethod(nameof(Sse3.AddSubtract), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) }) .Invoke(null, new object[] { Sse.LoadAlignedVector128((Single*)(_dataTable.inArray1Ptr)), Sse.LoadAlignedVector128((Single*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Single>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Sse3.AddSubtract( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Single>* pClsVar1 = &_clsVar1) fixed (Vector128<Single>* pClsVar2 = &_clsVar2) { var result = Sse3.AddSubtract( Sse.LoadVector128((Single*)(pClsVar1)), Sse.LoadVector128((Single*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr); var result = Sse3.AddSubtract(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = Sse.LoadVector128((Single*)(_dataTable.inArray1Ptr)); var op2 = Sse.LoadVector128((Single*)(_dataTable.inArray2Ptr)); var result = Sse3.AddSubtract(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var op1 = Sse.LoadAlignedVector128((Single*)(_dataTable.inArray1Ptr)); var op2 = Sse.LoadAlignedVector128((Single*)(_dataTable.inArray2Ptr)); var result = Sse3.AddSubtract(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new AlternatingBinaryOpTest__AddSubtractSingle(); var result = Sse3.AddSubtract(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new AlternatingBinaryOpTest__AddSubtractSingle(); fixed (Vector128<Single>* pFld1 = &test._fld1) fixed (Vector128<Single>* pFld2 = &test._fld2) { var result = Sse3.AddSubtract( Sse.LoadVector128((Single*)(pFld1)), Sse.LoadVector128((Single*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Sse3.AddSubtract(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Single>* pFld1 = &_fld1) fixed (Vector128<Single>* pFld2 = &_fld2) { var result = Sse3.AddSubtract( Sse.LoadVector128((Single*)(pFld1)), Sse.LoadVector128((Single*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Sse3.AddSubtract(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = Sse3.AddSubtract( Sse.LoadVector128((Single*)(&test._fld1)), Sse.LoadVector128((Single*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Single> op1, Vector128<Single> op2, void* result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Single[] outArray = new Single[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Single>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Single[] inArray1 = new Single[Op1ElementCount]; Single[] inArray2 = new Single[Op2ElementCount]; Single[] outArray = new Single[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Single>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Single>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Single[] left, Single[] right, Single[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i += 2) { if (BitConverter.SingleToInt32Bits(result[i]) != BitConverter.SingleToInt32Bits(left[i] - right[i])) { succeeded = false; break; } if (BitConverter.SingleToInt32Bits(result[i + 1]) != BitConverter.SingleToInt32Bits(left[i + 1] + right[i + 1])) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Sse3)}.{nameof(Sse3.AddSubtract)}<Single>(Vector128<Single>, Vector128<Single>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/libraries/System.Security.Cryptography.Xml/src/System/Security/Cryptography/Xml/CanonicalXmlElement.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Xml; using System.Text; using System.Collections; namespace System.Security.Cryptography.Xml { // the class that provides node subset state and canonicalization function to XmlElement internal sealed class CanonicalXmlElement : XmlElement, ICanonicalizableNode { private bool _isInNodeSet; public CanonicalXmlElement(string prefix, string localName, string namespaceURI, XmlDocument doc, bool defaultNodeSetInclusionState) : base(prefix, localName, namespaceURI, doc) { _isInNodeSet = defaultNodeSetInclusionState; } public bool IsInNodeSet { get { return _isInNodeSet; } set { _isInNodeSet = value; } } public void Write(StringBuilder strBuilder, DocPosition docPos, AncestralNamespaceContextManager anc) { Hashtable nsLocallyDeclared = new Hashtable(); SortedList nsListToRender = new SortedList(new NamespaceSortOrder()); SortedList attrListToRender = new SortedList(new AttributeSortOrder()); XmlAttributeCollection attrList = Attributes; if (attrList != null) { foreach (XmlAttribute attr in attrList) { if (((CanonicalXmlAttribute)attr).IsInNodeSet || Utils.IsNamespaceNode(attr) || Utils.IsXmlNamespaceNode(attr)) { if (Utils.IsNamespaceNode(attr)) { anc.TrackNamespaceNode(attr, nsListToRender, nsLocallyDeclared); } else if (Utils.IsXmlNamespaceNode(attr)) { anc.TrackXmlNamespaceNode(attr, nsListToRender, attrListToRender, nsLocallyDeclared); } else if (IsInNodeSet) { attrListToRender.Add(attr, null); } } } } if (!Utils.IsCommittedNamespace(this, Prefix, NamespaceURI)) { string name = ((Prefix.Length > 0) ? "xmlns" + ":" + Prefix : "xmlns"); XmlAttribute nsattrib = (XmlAttribute)OwnerDocument.CreateAttribute(name); nsattrib.Value = NamespaceURI; anc.TrackNamespaceNode(nsattrib, nsListToRender, nsLocallyDeclared); } if (IsInNodeSet) { anc.GetNamespacesToRender(this, attrListToRender, nsListToRender, nsLocallyDeclared); strBuilder.Append('<').Append(Name); foreach (object attr in nsListToRender.GetKeyList()) { (attr as CanonicalXmlAttribute).Write(strBuilder, docPos, anc); } foreach (object attr in attrListToRender.GetKeyList()) { (attr as CanonicalXmlAttribute).Write(strBuilder, docPos, anc); } strBuilder.Append('>'); } anc.EnterElementContext(); anc.LoadUnrenderedNamespaces(nsLocallyDeclared); anc.LoadRenderedNamespaces(nsListToRender); XmlNodeList childNodes = ChildNodes; foreach (XmlNode childNode in childNodes) { CanonicalizationDispatcher.Write(childNode, strBuilder, docPos, anc); } anc.ExitElementContext(); if (IsInNodeSet) { strBuilder.Append($"</{Name}>"); } } public void WriteHash(HashAlgorithm hash, DocPosition docPos, AncestralNamespaceContextManager anc) { Hashtable nsLocallyDeclared = new Hashtable(); SortedList nsListToRender = new SortedList(new NamespaceSortOrder()); SortedList attrListToRender = new SortedList(new AttributeSortOrder()); UTF8Encoding utf8 = new UTF8Encoding(false); byte[] rgbData; XmlAttributeCollection attrList = Attributes; if (attrList != null) { foreach (XmlAttribute attr in attrList) { if (((CanonicalXmlAttribute)attr).IsInNodeSet || Utils.IsNamespaceNode(attr) || Utils.IsXmlNamespaceNode(attr)) { if (Utils.IsNamespaceNode(attr)) { anc.TrackNamespaceNode(attr, nsListToRender, nsLocallyDeclared); } else if (Utils.IsXmlNamespaceNode(attr)) { anc.TrackXmlNamespaceNode(attr, nsListToRender, attrListToRender, nsLocallyDeclared); } else if (IsInNodeSet) { attrListToRender.Add(attr, null); } } } } if (!Utils.IsCommittedNamespace(this, Prefix, NamespaceURI)) { string name = ((Prefix.Length > 0) ? "xmlns" + ":" + Prefix : "xmlns"); XmlAttribute nsattrib = (XmlAttribute)OwnerDocument.CreateAttribute(name); nsattrib.Value = NamespaceURI; anc.TrackNamespaceNode(nsattrib, nsListToRender, nsLocallyDeclared); } if (IsInNodeSet) { anc.GetNamespacesToRender(this, attrListToRender, nsListToRender, nsLocallyDeclared); rgbData = utf8.GetBytes("<" + Name); hash.TransformBlock(rgbData, 0, rgbData.Length, rgbData, 0); foreach (object attr in nsListToRender.GetKeyList()) { (attr as CanonicalXmlAttribute).WriteHash(hash, docPos, anc); } foreach (object attr in attrListToRender.GetKeyList()) { (attr as CanonicalXmlAttribute).WriteHash(hash, docPos, anc); } rgbData = utf8.GetBytes(">"); hash.TransformBlock(rgbData, 0, rgbData.Length, rgbData, 0); } anc.EnterElementContext(); anc.LoadUnrenderedNamespaces(nsLocallyDeclared); anc.LoadRenderedNamespaces(nsListToRender); XmlNodeList childNodes = ChildNodes; foreach (XmlNode childNode in childNodes) { CanonicalizationDispatcher.WriteHash(childNode, hash, docPos, anc); } anc.ExitElementContext(); if (IsInNodeSet) { rgbData = utf8.GetBytes("</" + Name + ">"); hash.TransformBlock(rgbData, 0, rgbData.Length, rgbData, 0); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Xml; using System.Text; using System.Collections; namespace System.Security.Cryptography.Xml { // the class that provides node subset state and canonicalization function to XmlElement internal sealed class CanonicalXmlElement : XmlElement, ICanonicalizableNode { private bool _isInNodeSet; public CanonicalXmlElement(string prefix, string localName, string namespaceURI, XmlDocument doc, bool defaultNodeSetInclusionState) : base(prefix, localName, namespaceURI, doc) { _isInNodeSet = defaultNodeSetInclusionState; } public bool IsInNodeSet { get { return _isInNodeSet; } set { _isInNodeSet = value; } } public void Write(StringBuilder strBuilder, DocPosition docPos, AncestralNamespaceContextManager anc) { Hashtable nsLocallyDeclared = new Hashtable(); SortedList nsListToRender = new SortedList(new NamespaceSortOrder()); SortedList attrListToRender = new SortedList(new AttributeSortOrder()); XmlAttributeCollection attrList = Attributes; if (attrList != null) { foreach (XmlAttribute attr in attrList) { if (((CanonicalXmlAttribute)attr).IsInNodeSet || Utils.IsNamespaceNode(attr) || Utils.IsXmlNamespaceNode(attr)) { if (Utils.IsNamespaceNode(attr)) { anc.TrackNamespaceNode(attr, nsListToRender, nsLocallyDeclared); } else if (Utils.IsXmlNamespaceNode(attr)) { anc.TrackXmlNamespaceNode(attr, nsListToRender, attrListToRender, nsLocallyDeclared); } else if (IsInNodeSet) { attrListToRender.Add(attr, null); } } } } if (!Utils.IsCommittedNamespace(this, Prefix, NamespaceURI)) { string name = ((Prefix.Length > 0) ? "xmlns" + ":" + Prefix : "xmlns"); XmlAttribute nsattrib = (XmlAttribute)OwnerDocument.CreateAttribute(name); nsattrib.Value = NamespaceURI; anc.TrackNamespaceNode(nsattrib, nsListToRender, nsLocallyDeclared); } if (IsInNodeSet) { anc.GetNamespacesToRender(this, attrListToRender, nsListToRender, nsLocallyDeclared); strBuilder.Append('<').Append(Name); foreach (object attr in nsListToRender.GetKeyList()) { (attr as CanonicalXmlAttribute).Write(strBuilder, docPos, anc); } foreach (object attr in attrListToRender.GetKeyList()) { (attr as CanonicalXmlAttribute).Write(strBuilder, docPos, anc); } strBuilder.Append('>'); } anc.EnterElementContext(); anc.LoadUnrenderedNamespaces(nsLocallyDeclared); anc.LoadRenderedNamespaces(nsListToRender); XmlNodeList childNodes = ChildNodes; foreach (XmlNode childNode in childNodes) { CanonicalizationDispatcher.Write(childNode, strBuilder, docPos, anc); } anc.ExitElementContext(); if (IsInNodeSet) { strBuilder.Append($"</{Name}>"); } } public void WriteHash(HashAlgorithm hash, DocPosition docPos, AncestralNamespaceContextManager anc) { Hashtable nsLocallyDeclared = new Hashtable(); SortedList nsListToRender = new SortedList(new NamespaceSortOrder()); SortedList attrListToRender = new SortedList(new AttributeSortOrder()); UTF8Encoding utf8 = new UTF8Encoding(false); byte[] rgbData; XmlAttributeCollection attrList = Attributes; if (attrList != null) { foreach (XmlAttribute attr in attrList) { if (((CanonicalXmlAttribute)attr).IsInNodeSet || Utils.IsNamespaceNode(attr) || Utils.IsXmlNamespaceNode(attr)) { if (Utils.IsNamespaceNode(attr)) { anc.TrackNamespaceNode(attr, nsListToRender, nsLocallyDeclared); } else if (Utils.IsXmlNamespaceNode(attr)) { anc.TrackXmlNamespaceNode(attr, nsListToRender, attrListToRender, nsLocallyDeclared); } else if (IsInNodeSet) { attrListToRender.Add(attr, null); } } } } if (!Utils.IsCommittedNamespace(this, Prefix, NamespaceURI)) { string name = ((Prefix.Length > 0) ? "xmlns" + ":" + Prefix : "xmlns"); XmlAttribute nsattrib = (XmlAttribute)OwnerDocument.CreateAttribute(name); nsattrib.Value = NamespaceURI; anc.TrackNamespaceNode(nsattrib, nsListToRender, nsLocallyDeclared); } if (IsInNodeSet) { anc.GetNamespacesToRender(this, attrListToRender, nsListToRender, nsLocallyDeclared); rgbData = utf8.GetBytes("<" + Name); hash.TransformBlock(rgbData, 0, rgbData.Length, rgbData, 0); foreach (object attr in nsListToRender.GetKeyList()) { (attr as CanonicalXmlAttribute).WriteHash(hash, docPos, anc); } foreach (object attr in attrListToRender.GetKeyList()) { (attr as CanonicalXmlAttribute).WriteHash(hash, docPos, anc); } rgbData = utf8.GetBytes(">"); hash.TransformBlock(rgbData, 0, rgbData.Length, rgbData, 0); } anc.EnterElementContext(); anc.LoadUnrenderedNamespaces(nsLocallyDeclared); anc.LoadRenderedNamespaces(nsListToRender); XmlNodeList childNodes = ChildNodes; foreach (XmlNode childNode in childNodes) { CanonicalizationDispatcher.WriteHash(childNode, hash, docPos, anc); } anc.ExitElementContext(); if (IsInNodeSet) { rgbData = utf8.GetBytes("</" + Name + ">"); hash.TransformBlock(rgbData, 0, rgbData.Length, rgbData, 0); } } } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/coreclr/pal/inc/rt/cpp/stdint.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "palrt.h"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "palrt.h"
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/libraries/Microsoft.Extensions.DependencyModel/src/DependencyContextPaths.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Linq; namespace Microsoft.Extensions.DependencyModel { internal sealed class DependencyContextPaths { private const string DepsFilesProperty = "APP_CONTEXT_DEPS_FILES"; private const string FxDepsFileProperty = "FX_DEPS_FILE"; public static DependencyContextPaths Current { get; } = GetCurrent(); public string? Application { get; } public string? SharedRuntime { get; } public IEnumerable<string> NonApplicationPaths { get; } public DependencyContextPaths( string? application, string? sharedRuntime, IEnumerable<string>? nonApplicationPaths) { Application = application; SharedRuntime = sharedRuntime; NonApplicationPaths = nonApplicationPaths ?? Enumerable.Empty<string>(); } private static DependencyContextPaths GetCurrent() { object? deps = AppDomain.CurrentDomain.GetData(DepsFilesProperty); object? fxDeps = AppDomain.CurrentDomain.GetData(FxDepsFileProperty); return Create(deps as string, fxDeps as string); } internal static DependencyContextPaths Create(string? depsFiles, string? sharedRuntime) { string[]? files = depsFiles?.Split(new[] { ';' }, StringSplitOptions.RemoveEmptyEntries); string? application = files != null && files.Length > 0 ? files[0] : null; string[]? nonApplicationPaths = files? .Skip(1) // the application path .ToArray(); return new DependencyContextPaths( application, sharedRuntime, nonApplicationPaths); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.Linq; namespace Microsoft.Extensions.DependencyModel { internal sealed class DependencyContextPaths { private const string DepsFilesProperty = "APP_CONTEXT_DEPS_FILES"; private const string FxDepsFileProperty = "FX_DEPS_FILE"; public static DependencyContextPaths Current { get; } = GetCurrent(); public string? Application { get; } public string? SharedRuntime { get; } public IEnumerable<string> NonApplicationPaths { get; } public DependencyContextPaths( string? application, string? sharedRuntime, IEnumerable<string>? nonApplicationPaths) { Application = application; SharedRuntime = sharedRuntime; NonApplicationPaths = nonApplicationPaths ?? Enumerable.Empty<string>(); } private static DependencyContextPaths GetCurrent() { object? deps = AppDomain.CurrentDomain.GetData(DepsFilesProperty); object? fxDeps = AppDomain.CurrentDomain.GetData(FxDepsFileProperty); return Create(deps as string, fxDeps as string); } internal static DependencyContextPaths Create(string? depsFiles, string? sharedRuntime) { string[]? files = depsFiles?.Split(new[] { ';' }, StringSplitOptions.RemoveEmptyEntries); string? application = files != null && files.Length > 0 ? files[0] : null; string[]? nonApplicationPaths = files? .Skip(1) // the application path .ToArray(); return new DependencyContextPaths( application, sharedRuntime, nonApplicationPaths); } } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest470/Generated470.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated470 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public sequential sealed MyStruct520`2<T0, T1> extends [mscorlib]System.ValueType implements class IBase2`2<class BaseClass1,!T0>, class IBase2`2<!T0,class BaseClass1> { .pack 0 .size 1 .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "MyStruct520::Method7.4010<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot instance string ClassMethod1028() cil managed noinlining { ldstr "MyStruct520::ClassMethod1028.4012()" ret } .method public hidebysig newslot instance string ClassMethod1029() cil managed noinlining { ldstr "MyStruct520::ClassMethod1029.4013()" ret } .method public hidebysig newslot instance string ClassMethod1030<M0>() cil managed noinlining { ldstr "MyStruct520::ClassMethod1030.4014<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret } .method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret } .method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated470 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct520.T.T<T0,T1,(valuetype MyStruct520`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct520.T.T<T0,T1,(valuetype MyStruct520`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct520`2<!!T0,!!T1> callvirt instance string class IBase2`2<class BaseClass1,!!T0>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct520`2<!!T0,!!T1> callvirt instance string class IBase2`2<!!T0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct520.A.T<T1,(valuetype MyStruct520`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct520.A.T<T1,(valuetype MyStruct520`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct520`2<class BaseClass0,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct520`2<class BaseClass0,!!T1> callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct520.A.A<(valuetype MyStruct520`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct520.A.A<(valuetype MyStruct520`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct520`2<class BaseClass0,class BaseClass0> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct520`2<class BaseClass0,class BaseClass0> callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct520.A.B<(valuetype MyStruct520`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct520.A.B<(valuetype MyStruct520`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct520`2<class BaseClass0,class BaseClass1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct520`2<class BaseClass0,class BaseClass1> callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass0> V_1) ldloca V_1 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloca V_1 dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ClassMethod1028() ldstr "MyStruct520::ClassMethod1028.4012()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ClassMethod1029() ldstr "MyStruct520::ClassMethod1029.4013()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ClassMethod1030<object>() ldstr "MyStruct520::ClassMethod1030.4014<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup ldnull call instance bool valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::Equals(object) pop dup call instance int32 valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::GetHashCode() pop dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ToString() pop pop ldloc V_1 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_1 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_1 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_1 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass1> V_2) ldloca V_2 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloca V_2 dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ClassMethod1028() ldstr "MyStruct520::ClassMethod1028.4012()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ClassMethod1029() ldstr "MyStruct520::ClassMethod1029.4013()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ClassMethod1030<object>() ldstr "MyStruct520::ClassMethod1030.4014<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup ldnull call instance bool valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::Equals(object) pop dup call instance int32 valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::GetHashCode() pop dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ToString() pop pop ldloc V_2 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_2 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_2 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_2 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass0> V_5) ldloca V_5 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass0> .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV0 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.T<class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV1 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.A<valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV2 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV3 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.T<class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV4 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.B<valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV5 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV6 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.T<class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV7 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.A<valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV8 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV9 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.T<class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV10 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.B<valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV11 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11: .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass1> V_6) ldloca V_6 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass1> .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV12 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV12} LV12: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.T<class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV13 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV13} LV13: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.A<valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV14 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV14} LV14: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV15 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV15} LV15: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.T<class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV16 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV16} LV16: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.B<valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV17 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV17} LV17: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV18 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV18} LV18: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.T<class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV19 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV19} LV19: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.A<valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV20 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV20} LV20: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV21 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV21} LV21: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.T<class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV22 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV22} LV22: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.B<valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV23 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV23} LV23: ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass0> V_9) ldloca V_9 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass0> .try { ldloc V_9 ldstr "MyStruct520::Method7.4010<System.Object>()#" + "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.MyStruct520.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV0 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0: .try { ldloc V_9 ldstr "MyStruct520::Method7.4010<System.Object>()#" + "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.MyStruct520.A.T<class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV1 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1: .try { ldloc V_9 ldstr "MyStruct520::Method7.4010<System.Object>()#" + "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.MyStruct520.A.A<valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV2 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2: .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass1> V_10) ldloca V_10 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass1> .try { ldloc V_10 ldstr "MyStruct520::Method7.4010<System.Object>()#" + "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.MyStruct520.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV3 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3: .try { ldloc V_10 ldstr "MyStruct520::Method7.4010<System.Object>()#" + "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.MyStruct520.A.T<class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV4 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4: .try { ldloc V_10 ldstr "MyStruct520::Method7.4010<System.Object>()#" + "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.MyStruct520.A.B<valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV5 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5: ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass0> V_13) ldloca V_13 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ClassMethod1028() calli default string(object) ldstr "MyStruct520::ClassMethod1028.4012()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ClassMethod1029() calli default string(object) ldstr "MyStruct520::ClassMethod1029.4013()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ClassMethod1030<object>() calli default string(object) ldstr "MyStruct520::ClassMethod1030.4014<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldnull ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance bool valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::Equals(object) calli default bool(object,object) pop ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance int32 valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::GetHashCode() calli default int32(object) pop ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ToString() calli default string(object) pop ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass1> V_14) ldloca V_14 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ClassMethod1028() calli default string(object) ldstr "MyStruct520::ClassMethod1028.4012()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ClassMethod1029() calli default string(object) ldstr "MyStruct520::ClassMethod1029.4013()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ClassMethod1030<object>() calli default string(object) ldstr "MyStruct520::ClassMethod1030.4014<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldnull ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance bool valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::Equals(object) calli default bool(object,object) pop ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance int32 valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::GetHashCode() calli default int32(object) pop ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ToString() calli default string(object) pop ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated470::MethodCallingTest() call void Generated470::ConstrainedCallsTest() call void Generated470::StructConstrainedInterfaceCallsTest() call void Generated470::CalliTest() ldc.i4 100 ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 } .assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) } //TYPES IN FORWARDER ASSEMBLIES: //TEST ASSEMBLY: .assembly Generated470 { .hash algorithm 0x00008004 } .assembly extern xunit.core {} .class public BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void [mscorlib]System.Object::.ctor() ret } } .class public BaseClass1 extends BaseClass0 { .method public hidebysig specialname rtspecialname instance void .ctor() cil managed { ldarg.0 call instance void BaseClass0::.ctor() ret } } .class public sequential sealed MyStruct520`2<T0, T1> extends [mscorlib]System.ValueType implements class IBase2`2<class BaseClass1,!T0>, class IBase2`2<!T0,class BaseClass1> { .pack 0 .size 1 .method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining { ldstr "MyStruct520::Method7.4010<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig newslot instance string ClassMethod1028() cil managed noinlining { ldstr "MyStruct520::ClassMethod1028.4012()" ret } .method public hidebysig newslot instance string ClassMethod1029() cil managed noinlining { ldstr "MyStruct520::ClassMethod1029.4013()" ret } .method public hidebysig newslot instance string ClassMethod1030<M0>() cil managed noinlining { ldstr "MyStruct520::ClassMethod1030.4014<" ldtoken !!M0 call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle) call string [mscorlib]System.String::Concat(object,object) ldstr ">()" call string [mscorlib]System.String::Concat(object,object) ret } .method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret } .method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret } .method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret } } .class interface public abstract IBase2`2<+T0, -T1> { .method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { } } .class public auto ansi beforefieldinit Generated470 { .method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed { .maxstack 5 .locals init (string[] actualResults) ldc.i4.s 0 newarr string stloc.s actualResults ldarg.1 ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)" ldc.i4.s 0 ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 6 .locals init (string[] actualResults) ldc.i4.s 1 newarr string stloc.s actualResults ldarg.1 ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 1 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. !!W callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct520.T.T<T0,T1,(valuetype MyStruct520`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct520.T.T<T0,T1,(valuetype MyStruct520`2<!!T0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct520`2<!!T0,!!T1> callvirt instance string class IBase2`2<class BaseClass1,!!T0>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct520`2<!!T0,!!T1> callvirt instance string class IBase2`2<!!T0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct520.A.T<T1,(valuetype MyStruct520`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct520.A.T<T1,(valuetype MyStruct520`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct520`2<class BaseClass0,!!T1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct520`2<class BaseClass0,!!T1> callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct520.A.A<(valuetype MyStruct520`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct520.A.A<(valuetype MyStruct520`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct520`2<class BaseClass0,class BaseClass0> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct520`2<class BaseClass0,class BaseClass0> callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method static void M.MyStruct520.A.B<(valuetype MyStruct520`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed { .maxstack 7 .locals init (string[] actualResults) ldc.i4.s 2 newarr string stloc.s actualResults ldarg.1 ldstr "M.MyStruct520.A.B<(valuetype MyStruct520`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)" ldc.i4.s 2 ldloc.s actualResults ldc.i4.s 0 ldarga.s 0 constrained. valuetype MyStruct520`2<class BaseClass0,class BaseClass1> callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() stelem.ref ldloc.s actualResults ldc.i4.s 1 ldarga.s 0 constrained. valuetype MyStruct520`2<class BaseClass0,class BaseClass1> callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() stelem.ref ldloc.s actualResults call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[]) ret } .method public hidebysig static void MethodCallingTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calling Test ==========================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass0> V_1) ldloca V_1 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloca V_1 dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ClassMethod1028() ldstr "MyStruct520::ClassMethod1028.4012()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ClassMethod1029() ldstr "MyStruct520::ClassMethod1029.4013()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ClassMethod1030<object>() ldstr "MyStruct520::ClassMethod1030.4014<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup ldnull call instance bool valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::Equals(object) pop dup call instance int32 valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::GetHashCode() pop dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ToString() pop pop ldloc V_1 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_1 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_1 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_1 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass1> V_2) ldloca V_2 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloca V_2 dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ClassMethod1028() ldstr "MyStruct520::ClassMethod1028.4012()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ClassMethod1029() ldstr "MyStruct520::ClassMethod1029.4013()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ClassMethod1030<object>() ldstr "MyStruct520::ClassMethod1030.4014<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type MyStruct520" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) dup ldnull call instance bool valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::Equals(object) pop dup call instance int32 valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::GetHashCode() pop dup call instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ToString() pop pop ldloc V_2 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_2 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_2 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldloc V_2 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> dup callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) pop ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void ConstrainedCallsTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Constrained Calls Test ==========================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass0> V_5) ldloca V_5 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass0> .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV0 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.T<class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV1 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.A<valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV2 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV3 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.T<class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV4 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.B<valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV5 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV6 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.T<class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV7 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.A<valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV8 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV9 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.T<class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV10 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10: .try { ldloc V_5 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.B<valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV11 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11: .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass1> V_6) ldloca V_6 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass1> .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV12 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV12} LV12: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.T<class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV13 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV13} LV13: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.A<valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV14 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV14} LV14: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV15 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV15} LV15: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.T<class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV16 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV16} LV16: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.B<valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV17 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV17} LV17: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV18 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV18} LV18: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.T<class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV19 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV19} LV19: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.A.A<valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV20 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV20} LV20: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV21 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV21} LV21: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.T<class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV22 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV22} LV22: .try { ldloc V_6 ldstr "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.IBase2.B.B<valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV23 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV23} LV23: ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed { .maxstack 10 ldstr "===================== Struct Constrained Interface Calls Test =====================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass0> V_9) ldloca V_9 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass0> .try { ldloc V_9 ldstr "MyStruct520::Method7.4010<System.Object>()#" + "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.MyStruct520.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!2,string) leave.s LV0 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0: .try { ldloc V_9 ldstr "MyStruct520::Method7.4010<System.Object>()#" + "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.MyStruct520.A.T<class BaseClass0,valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!1,string) leave.s LV1 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1: .try { ldloc V_9 ldstr "MyStruct520::Method7.4010<System.Object>()#" + "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.MyStruct520.A.A<valuetype MyStruct520`2<class BaseClass0,class BaseClass0>>(!!0,string) leave.s LV2 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2: .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass1> V_10) ldloca V_10 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass1> .try { ldloc V_10 ldstr "MyStruct520::Method7.4010<System.Object>()#" + "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.MyStruct520.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!2,string) leave.s LV3 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3: .try { ldloc V_10 ldstr "MyStruct520::Method7.4010<System.Object>()#" + "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.MyStruct520.A.T<class BaseClass1,valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!1,string) leave.s LV4 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4: .try { ldloc V_10 ldstr "MyStruct520::Method7.4010<System.Object>()#" + "MyStruct520::Method7.4010<System.Object>()#" call void Generated470::M.MyStruct520.A.B<valuetype MyStruct520`2<class BaseClass0,class BaseClass1>>(!!0,string) leave.s LV5 } catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5: ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static void CalliTest() cil managed { .maxstack 10 .locals init (object V_0) ldstr "========================== Method Calli Test ==========================" call void [mscorlib]System.Console::WriteLine(string) .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass0> V_13) ldloca V_13 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ClassMethod1028() calli default string(object) ldstr "MyStruct520::ClassMethod1028.4012()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ClassMethod1029() calli default string(object) ldstr "MyStruct520::ClassMethod1029.4013()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ClassMethod1030<object>() calli default string(object) ldstr "MyStruct520::ClassMethod1030.4014<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldnull ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance bool valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::Equals(object) calli default bool(object,object) pop ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance int32 valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::GetHashCode() calli default int32(object) pop ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass0>::ToString() calli default string(object) pop ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldloc V_13 box valuetype MyStruct520`2<class BaseClass0,class BaseClass0> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass0>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) .locals init (valuetype MyStruct520`2<class BaseClass0,class BaseClass1> V_14) ldloca V_14 initobj valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ClassMethod1028() calli default string(object) ldstr "MyStruct520::ClassMethod1028.4012()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ClassMethod1029() calli default string(object) ldstr "MyStruct520::ClassMethod1029.4013()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ClassMethod1030<object>() calli default string(object) ldstr "MyStruct520::ClassMethod1030.4014<System.Object>()" ldstr "valuetype MyStruct520`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldnull ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance bool valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::Equals(object) calli default bool(object,object) pop ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance int32 valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::GetHashCode() calli default int32(object) pop ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string valuetype MyStruct520`2<class BaseClass0,class BaseClass1>::ToString() calli default string(object) pop ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldloc V_14 box valuetype MyStruct520`2<class BaseClass0,class BaseClass1> ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>() calli default string(object) ldstr "MyStruct520::Method7.4010<System.Object>()" ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct520`2<class BaseClass0,class BaseClass1>" call void [TestFramework]TestFramework::MethodCallTest(string,string,string) ldstr "========================================================================\n\n" call void [mscorlib]System.Console::WriteLine(string) ret } .method public hidebysig static int32 Main() cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint .maxstack 10 call void Generated470::MethodCallingTest() call void Generated470::ConstrainedCallsTest() call void Generated470::StructConstrainedInterfaceCallsTest() call void Generated470::CalliTest() ldc.i4 100 ret } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/libraries/Common/src/Interop/Windows/User32/Interop.LoadString.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class User32 { [GeneratedDllImport(Libraries.User32, EntryPoint = "LoadStringW", SetLastError = true)] internal static unsafe partial int LoadString(IntPtr hInstance, uint uID, char* lpBuffer, int cchBufferMax); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class User32 { [GeneratedDllImport(Libraries.User32, EntryPoint = "LoadStringW", SetLastError = true)] internal static unsafe partial int LoadString(IntPtr hInstance, uint uID, char* lpBuffer, int cchBufferMax); } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/libraries/System.Runtime.InteropServices/tests/System.Runtime.InteropServices.UnitTests/System/Runtime/InteropServices/Marshal/GetEndComSlotTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Reflection; using System.Reflection.Emit; using System.Runtime.InteropServices.Tests.Common; using Xunit; namespace System.Runtime.InteropServices.Tests { public partial class GetEndComSlotTests { [Fact] [PlatformSpecific(TestPlatforms.AnyUnix)] public void GetEndComSlot_Unix_ThrowsPlatformNotSupportedException() { Assert.Throws<PlatformNotSupportedException>(() => Marshal.GetEndComSlot(null)); } [Fact] [PlatformSpecific(TestPlatforms.Windows)] public void GetEndComSlot_NullType_ThrowsArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>(null, () => Marshal.GetEndComSlot(null)); } [Fact] [PlatformSpecific(TestPlatforms.Windows)] public void GetEndComSlot_NotRuntimeType_ThrowsArgumentException() { AssemblyBuilder assemblyBuilder = AssemblyBuilder.DefineDynamicAssembly(new AssemblyName("Assembly"), AssemblyBuilderAccess.Run); ModuleBuilder moduleBuilder = assemblyBuilder.DefineDynamicModule("Module"); TypeBuilder typeBuilder = moduleBuilder.DefineType("Type"); AssertExtensions.Throws<ArgumentException>("t", () => Marshal.GetEndComSlot(typeBuilder)); } public static IEnumerable<object[]> GetStartComSlot_InvalidGenericType_TestData() { yield return new object[] { typeof(int).MakeByRefType() }; yield return new object[] { typeof(GenericClass<>).GetTypeInfo().GenericTypeParameters[0] }; } [Theory] [MemberData(nameof(GetStartComSlot_InvalidGenericType_TestData))] [PlatformSpecific(TestPlatforms.Windows)] public void GetEndComSlot_InvalidGenericType_ThrowsArgumentNullException(Type type) { AssertExtensions.Throws<ArgumentNullException>(null, () => Marshal.GetEndComSlot(type)); } public static IEnumerable<object[]> GetStartComSlot_NotComVisibleType_TestData() { yield return new object[] { typeof(GenericClass<>) }; yield return new object[] { typeof(GenericClass<string>) }; yield return new object[] { typeof(GenericStruct<>) }; yield return new object[] { typeof(GenericStruct<string>) }; yield return new object[] { typeof(IGenericInterface<>) }; yield return new object[] { typeof(IGenericInterface<string>) }; yield return new object[] { typeof(NonComVisibleClass) }; yield return new object[] { typeof(NonComVisibleStruct) }; yield return new object[] { typeof(INonComVisibleInterface) }; yield return new object[] { typeof(int[]) }; yield return new object[] { typeof(int[][]) }; yield return new object[] { typeof(int[,]) }; AssemblyBuilder assemblyBuilder = AssemblyBuilder.DefineDynamicAssembly(new AssemblyName("Assembly"), AssemblyBuilderAccess.RunAndCollect); ModuleBuilder moduleBuilder = assemblyBuilder.DefineDynamicModule("Module"); TypeBuilder typeBuilder = moduleBuilder.DefineType("Type"); Type collectibleType = typeBuilder.CreateType(); yield return new object[] { collectibleType }; } [Theory] [MemberData(nameof(GetStartComSlot_NotComVisibleType_TestData))] [PlatformSpecific(TestPlatforms.Windows)] public void GetEndComSlot_NotComVisibleType_ThrowsArgumentException(Type type) { AssertExtensions.Throws<ArgumentException>("t", () => Marshal.GetEndComSlot(type)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Reflection; using System.Reflection.Emit; using System.Runtime.InteropServices.Tests.Common; using Xunit; namespace System.Runtime.InteropServices.Tests { public partial class GetEndComSlotTests { [Fact] [PlatformSpecific(TestPlatforms.AnyUnix)] public void GetEndComSlot_Unix_ThrowsPlatformNotSupportedException() { Assert.Throws<PlatformNotSupportedException>(() => Marshal.GetEndComSlot(null)); } [Fact] [PlatformSpecific(TestPlatforms.Windows)] public void GetEndComSlot_NullType_ThrowsArgumentNullException() { AssertExtensions.Throws<ArgumentNullException>(null, () => Marshal.GetEndComSlot(null)); } [Fact] [PlatformSpecific(TestPlatforms.Windows)] public void GetEndComSlot_NotRuntimeType_ThrowsArgumentException() { AssemblyBuilder assemblyBuilder = AssemblyBuilder.DefineDynamicAssembly(new AssemblyName("Assembly"), AssemblyBuilderAccess.Run); ModuleBuilder moduleBuilder = assemblyBuilder.DefineDynamicModule("Module"); TypeBuilder typeBuilder = moduleBuilder.DefineType("Type"); AssertExtensions.Throws<ArgumentException>("t", () => Marshal.GetEndComSlot(typeBuilder)); } public static IEnumerable<object[]> GetStartComSlot_InvalidGenericType_TestData() { yield return new object[] { typeof(int).MakeByRefType() }; yield return new object[] { typeof(GenericClass<>).GetTypeInfo().GenericTypeParameters[0] }; } [Theory] [MemberData(nameof(GetStartComSlot_InvalidGenericType_TestData))] [PlatformSpecific(TestPlatforms.Windows)] public void GetEndComSlot_InvalidGenericType_ThrowsArgumentNullException(Type type) { AssertExtensions.Throws<ArgumentNullException>(null, () => Marshal.GetEndComSlot(type)); } public static IEnumerable<object[]> GetStartComSlot_NotComVisibleType_TestData() { yield return new object[] { typeof(GenericClass<>) }; yield return new object[] { typeof(GenericClass<string>) }; yield return new object[] { typeof(GenericStruct<>) }; yield return new object[] { typeof(GenericStruct<string>) }; yield return new object[] { typeof(IGenericInterface<>) }; yield return new object[] { typeof(IGenericInterface<string>) }; yield return new object[] { typeof(NonComVisibleClass) }; yield return new object[] { typeof(NonComVisibleStruct) }; yield return new object[] { typeof(INonComVisibleInterface) }; yield return new object[] { typeof(int[]) }; yield return new object[] { typeof(int[][]) }; yield return new object[] { typeof(int[,]) }; AssemblyBuilder assemblyBuilder = AssemblyBuilder.DefineDynamicAssembly(new AssemblyName("Assembly"), AssemblyBuilderAccess.RunAndCollect); ModuleBuilder moduleBuilder = assemblyBuilder.DefineDynamicModule("Module"); TypeBuilder typeBuilder = moduleBuilder.DefineType("Type"); Type collectibleType = typeBuilder.CreateType(); yield return new object[] { collectibleType }; } [Theory] [MemberData(nameof(GetStartComSlot_NotComVisibleType_TestData))] [PlatformSpecific(TestPlatforms.Windows)] public void GetEndComSlot_NotComVisibleType_ThrowsArgumentException(Type type) { AssertExtensions.Throws<ArgumentException>("t", () => Marshal.GetEndComSlot(type)); } } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/tests/JIT/jit64/eh/FinallyExec/nonlocalexitinhandler.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="nonlocalexitinhandler.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\..\..\common\eh_common.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="nonlocalexitinhandler.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\..\..\common\eh_common.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/tests/JIT/Methodical/eh/nested/cascadedcatchret/throwincascadedexcept.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly extern mscorlib {} .assembly extern eh_common {} .assembly 'throwincascadedexcept' {} .class public auto ansi Test_throwincascadedexcept extends [mscorlib] System.Object { .method public static int32 main() { .entrypoint .maxstack 2 .locals init ( int32 V_0, int32 V1, class [mscorlib]System.IO.StringWriter expectedOut, class [eh_common]TestUtil.TestLog testLog ) newobj instance void [mscorlib]System.IO.StringWriter::.ctor() stloc.s expectedOut ldloc.s expectedOut ldstr "begin" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 0" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 1" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 2" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 3" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 4" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 5" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 6" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 7" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 8" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 9" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 0, ldloc.0 is 10" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 50" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 0" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 1, ldloc.0 is 11" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 49" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 1" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 2, ldloc.0 is 12" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 48" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 2" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 3, ldloc.0 is 13" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 47" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 3" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 4, ldloc.0 is 14" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 46" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 4" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 5, ldloc.0 is 15" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 45" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 5" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 6, ldloc.0 is 16" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 44" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 6" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 7, ldloc.0 is 17" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 43" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 7" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 8, ldloc.0 is 18" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 42" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 8" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 9, ldloc.0 is 19" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 41" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 9" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 10, ldloc.0 is 20" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 40" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 10" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 11, ldloc.0 is 21" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 39" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 11" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 12, ldloc.0 is 22" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 38" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 12" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 13, ldloc.0 is 23" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 37" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 13" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 14, ldloc.0 is 24" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 36" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 14" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 15, ldloc.0 is 25" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 35" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 15" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 16, ldloc.0 is 26" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 34" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 16" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 17, ldloc.0 is 27" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 33" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 17" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 18, ldloc.0 is 28" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 32" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 18" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 19, ldloc.0 is 29" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 31" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 19" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 20, ldloc.0 is 30" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 30" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 20" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 21, ldloc.0 is 31" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 29" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 21" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 22, ldloc.0 is 32" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 28" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 22" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 23, ldloc.0 is 33" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 27" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 23" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 24, ldloc.0 is 34" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 26" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 24" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 25, ldloc.0 is 35" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 25" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 25" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 26, ldloc.0 is 36" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 24" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 26" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 27, ldloc.0 is 37" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 23" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 27" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 28, ldloc.0 is 38" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 22" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 28" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 29, ldloc.0 is 39" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 21" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 29" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 30, ldloc.0 is 40" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 20" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 30" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 31, ldloc.0 is 41" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 19" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 31" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 32, ldloc.0 is 42" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 18" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 32" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 33, ldloc.0 is 43" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 17" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 33" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 34, ldloc.0 is 44" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 16" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 34" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 35, ldloc.0 is 45" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 15" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 35" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 36, ldloc.0 is 46" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 14" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 36" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 37, ldloc.0 is 47" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 13" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 37" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 38, ldloc.0 is 48" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 12" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 38" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 39, ldloc.0 is 49" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 11" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 39" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 9" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 8" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 7" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 6" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 5" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 4" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 3" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 2" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 1" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 0" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "begin" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut newobj instance void [eh_common]TestUtil.TestLog::.ctor(object) stloc.s testLog ldloc.s testLog callvirt instance void [eh_common]TestUtil.TestLog::StartRecording() ldc.i4 0 stloc.0 begin: ldstr "begin" call void [System.Console]System.Console::WriteLine(string) ldloc.0 brtrue done .try { ldstr "In try 0" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 1" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 2" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 3" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 4" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 5" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 6" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 7" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 8" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 9" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 label0: .try { ldstr "In try 0, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 50" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 0" call void [System.Console]System.Console::WriteLine(string) label1: .try { ldstr "In try 1, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 49" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 1" call void [System.Console]System.Console::WriteLine(string) label2: .try { ldstr "In try 2, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 48" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 2" call void [System.Console]System.Console::WriteLine(string) label3: .try { ldstr "In try 3, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 47" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 3" call void [System.Console]System.Console::WriteLine(string) label4: .try { ldstr "In try 4, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 46" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 4" call void [System.Console]System.Console::WriteLine(string) label5: .try { ldstr "In try 5, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 45" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 5" call void [System.Console]System.Console::WriteLine(string) label6: .try { ldstr "In try 6, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 44" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 6" call void [System.Console]System.Console::WriteLine(string) label7: .try { ldstr "In try 7, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 43" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 7" call void [System.Console]System.Console::WriteLine(string) label8: .try { ldstr "In try 8, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 42" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 8" call void [System.Console]System.Console::WriteLine(string) label9: .try { ldstr "In try 9, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 41" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 9" call void [System.Console]System.Console::WriteLine(string) label10: .try { ldstr "In try 10, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 40" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 10" call void [System.Console]System.Console::WriteLine(string) label11: .try { ldstr "In try 11, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 39" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 11" call void [System.Console]System.Console::WriteLine(string) label12: .try { ldstr "In try 12, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 38" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 12" call void [System.Console]System.Console::WriteLine(string) label13: .try { ldstr "In try 13, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 37" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 13" call void [System.Console]System.Console::WriteLine(string) label14: .try { ldstr "In try 14, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 36" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 14" call void [System.Console]System.Console::WriteLine(string) label15: .try { ldstr "In try 15, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 35" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 15" call void [System.Console]System.Console::WriteLine(string) label16: .try { ldstr "In try 16, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 34" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 16" call void [System.Console]System.Console::WriteLine(string) label17: .try { ldstr "In try 17, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 33" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 17" call void [System.Console]System.Console::WriteLine(string) label18: .try { ldstr "In try 18, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 32" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 18" call void [System.Console]System.Console::WriteLine(string) label19: .try { ldstr "In try 19, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 31" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 19" call void [System.Console]System.Console::WriteLine(string) label20: .try { ldstr "In try 20, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 30" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 20" call void [System.Console]System.Console::WriteLine(string) label21: .try { ldstr "In try 21, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 29" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 21" call void [System.Console]System.Console::WriteLine(string) label22: .try { ldstr "In try 22, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 28" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 22" call void [System.Console]System.Console::WriteLine(string) label23: .try { ldstr "In try 23, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 27" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 23" call void [System.Console]System.Console::WriteLine(string) label24: .try { ldstr "In try 24, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 26" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 24" call void [System.Console]System.Console::WriteLine(string) label25: .try { ldstr "In try 25, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 25" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 25" call void [System.Console]System.Console::WriteLine(string) label26: .try { ldstr "In try 26, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 24" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 26" call void [System.Console]System.Console::WriteLine(string) label27: .try { ldstr "In try 27, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 23" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 27" call void [System.Console]System.Console::WriteLine(string) label28: .try { ldstr "In try 28, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 22" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 28" call void [System.Console]System.Console::WriteLine(string) label29: .try { ldstr "In try 29, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 21" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 29" call void [System.Console]System.Console::WriteLine(string) label30: .try { ldstr "In try 30, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 20" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 30" call void [System.Console]System.Console::WriteLine(string) label31: .try { ldstr "In try 31, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 19" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 31" call void [System.Console]System.Console::WriteLine(string) label32: .try { ldstr "In try 32, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 18" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 32" call void [System.Console]System.Console::WriteLine(string) label33: .try { ldstr "In try 33, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 17" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 33" call void [System.Console]System.Console::WriteLine(string) label34: .try { ldstr "In try 34, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 16" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 34" call void [System.Console]System.Console::WriteLine(string) label35: .try { ldstr "In try 35, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 15" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 35" call void [System.Console]System.Console::WriteLine(string) label36: .try { ldstr "In try 36, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 14" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 36" call void [System.Console]System.Console::WriteLine(string) label37: .try { ldstr "In try 37, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 13" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 37" call void [System.Console]System.Console::WriteLine(string) label38: .try { ldstr "In try 38, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 12" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 38" call void [System.Console]System.Console::WriteLine(string) label39: .try { ldstr "In try 39, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 11" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 39" call void [System.Console]System.Console::WriteLine(string) ldloc.0 brfalse.s next0 ldloc.0 ldc.i4 1 beq.s next0a leave begin next0a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label38 next0: leave done } ldloc.0 brfalse.s next1 ldloc.0 ldc.i4 1 beq.s next1a leave begin next1a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label37 next1: leave done } ldloc.0 brfalse.s next2 ldloc.0 ldc.i4 1 beq.s next2a leave begin next2a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label36 next2: leave done } ldloc.0 brfalse.s next3 ldloc.0 ldc.i4 1 beq.s next3a leave begin next3a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label35 next3: leave done } ldloc.0 brfalse.s next4 ldloc.0 ldc.i4 1 beq.s next4a leave begin next4a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label34 next4: leave done } ldloc.0 brfalse.s next5 ldloc.0 ldc.i4 1 beq.s next5a leave begin next5a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label33 next5: leave done } ldloc.0 brfalse.s next6 ldloc.0 ldc.i4 1 beq.s next6a leave begin next6a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label32 next6: leave done } ldloc.0 brfalse.s next7 ldloc.0 ldc.i4 1 beq.s next7a leave begin next7a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label31 next7: leave done } ldloc.0 brfalse.s next8 ldloc.0 ldc.i4 1 beq.s next8a leave begin next8a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label30 next8: leave done } ldloc.0 brfalse.s next9 ldloc.0 ldc.i4 1 beq.s next9a leave begin next9a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label29 next9: leave done } ldloc.0 brfalse.s next10 ldloc.0 ldc.i4 1 beq.s next10a leave begin next10a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label28 next10: leave done } ldloc.0 brfalse.s next11 ldloc.0 ldc.i4 1 beq.s next11a leave begin next11a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label27 next11: leave done } ldloc.0 brfalse.s next12 ldloc.0 ldc.i4 1 beq.s next12a leave begin next12a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label26 next12: leave done } ldloc.0 brfalse.s next13 ldloc.0 ldc.i4 1 beq.s next13a leave begin next13a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label25 next13: leave done } ldloc.0 brfalse.s next14 ldloc.0 ldc.i4 1 beq.s next14a leave begin next14a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label24 next14: leave done } ldloc.0 brfalse.s next15 ldloc.0 ldc.i4 1 beq.s next15a leave begin next15a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label23 next15: leave done } ldloc.0 brfalse.s next16 ldloc.0 ldc.i4 1 beq.s next16a leave begin next16a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label22 next16: leave done } ldloc.0 brfalse.s next17 ldloc.0 ldc.i4 1 beq.s next17a leave begin next17a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label21 next17: leave done } ldloc.0 brfalse.s next18 ldloc.0 ldc.i4 1 beq.s next18a leave begin next18a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label20 next18: leave done } ldloc.0 brfalse.s next19 ldloc.0 ldc.i4 1 beq.s next19a leave begin next19a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label19 next19: leave done } ldloc.0 brfalse.s next20 ldloc.0 ldc.i4 1 beq.s next20a leave begin next20a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label18 next20: leave done } ldloc.0 brfalse.s next21 ldloc.0 ldc.i4 1 beq.s next21a leave begin next21a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label17 next21: leave done } ldloc.0 brfalse.s next22 ldloc.0 ldc.i4 1 beq.s next22a leave begin next22a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label16 next22: leave done } ldloc.0 brfalse.s next23 ldloc.0 ldc.i4 1 beq.s next23a leave begin next23a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label15 next23: leave done } ldloc.0 brfalse.s next24 ldloc.0 ldc.i4 1 beq.s next24a leave begin next24a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label14 next24: leave done } ldloc.0 brfalse.s next25 ldloc.0 ldc.i4 1 beq.s next25a leave begin next25a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label13 next25: leave done } ldloc.0 brfalse.s next26 ldloc.0 ldc.i4 1 beq.s next26a leave begin next26a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label12 next26: leave done } ldloc.0 brfalse.s next27 ldloc.0 ldc.i4 1 beq.s next27a leave begin next27a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label11 next27: leave done } ldloc.0 brfalse.s next28 ldloc.0 ldc.i4 1 beq.s next28a leave begin next28a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label10 next28: leave done } ldloc.0 brfalse.s next29 ldloc.0 ldc.i4 1 beq.s next29a leave begin next29a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label9 next29: leave done } ldloc.0 brfalse.s next30 ldloc.0 ldc.i4 1 beq.s next30a leave begin next30a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label8 next30: leave done } ldloc.0 brfalse.s next31 ldloc.0 ldc.i4 1 beq.s next31a leave begin next31a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label7 next31: leave done } ldloc.0 brfalse.s next32 ldloc.0 ldc.i4 1 beq.s next32a leave begin next32a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label6 next32: leave done } ldloc.0 brfalse.s next33 ldloc.0 ldc.i4 1 beq.s next33a leave begin next33a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label5 next33: leave done } ldloc.0 brfalse.s next34 ldloc.0 ldc.i4 1 beq.s next34a leave begin next34a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label4 next34: leave done } ldloc.0 brfalse.s next35 ldloc.0 ldc.i4 1 beq.s next35a leave begin next35a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label3 next35: leave done } ldloc.0 brfalse.s next36 ldloc.0 ldc.i4 1 beq.s next36a leave begin next36a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label2 next36: leave done } ldloc.0 brfalse.s next37 ldloc.0 ldc.i4 1 beq.s next37a leave begin next37a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label1 next37: leave done } ldloc.0 brfalse.s next38 ldloc.0 ldc.i4 1 beq.s next38a leave begin next38a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label0 next38: leave done } ldloc.0 brfalse.s next39 ldloc.0 ldc.i4 1 beq.s next39a leave begin next39a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label0 next39: leave done } } finally { ldstr "In finally 9" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 8" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 7" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 6" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 5" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 4" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 3" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 2" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 1" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 0" call void [System.Console]System.Console::WriteLine(string) endfinally } done: ldloc.s testLog callvirt instance void [eh_common]TestUtil.TestLog::StopRecording() ldloc.s testLog callvirt instance int32 [eh_common]TestUtil.TestLog::VerifyOutput() ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly extern mscorlib {} .assembly extern eh_common {} .assembly 'throwincascadedexcept' {} .class public auto ansi Test_throwincascadedexcept extends [mscorlib] System.Object { .method public static int32 main() { .entrypoint .maxstack 2 .locals init ( int32 V_0, int32 V1, class [mscorlib]System.IO.StringWriter expectedOut, class [eh_common]TestUtil.TestLog testLog ) newobj instance void [mscorlib]System.IO.StringWriter::.ctor() stloc.s expectedOut ldloc.s expectedOut ldstr "begin" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 0" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 1" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 2" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 3" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 4" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 5" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 6" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 7" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 8" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 9" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 0, ldloc.0 is 10" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 50" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 0" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 1, ldloc.0 is 11" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 49" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 1" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 2, ldloc.0 is 12" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 48" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 2" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 3, ldloc.0 is 13" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 47" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 3" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 4, ldloc.0 is 14" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 46" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 4" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 5, ldloc.0 is 15" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 45" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 5" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 6, ldloc.0 is 16" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 44" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 6" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 7, ldloc.0 is 17" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 43" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 7" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 8, ldloc.0 is 18" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 42" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 8" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 9, ldloc.0 is 19" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 41" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 9" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 10, ldloc.0 is 20" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 40" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 10" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 11, ldloc.0 is 21" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 39" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 11" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 12, ldloc.0 is 22" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 38" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 12" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 13, ldloc.0 is 23" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 37" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 13" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 14, ldloc.0 is 24" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 36" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 14" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 15, ldloc.0 is 25" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 35" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 15" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 16, ldloc.0 is 26" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 34" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 16" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 17, ldloc.0 is 27" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 33" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 17" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 18, ldloc.0 is 28" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 32" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 18" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 19, ldloc.0 is 29" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 31" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 19" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 20, ldloc.0 is 30" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 30" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 20" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 21, ldloc.0 is 31" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 29" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 21" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 22, ldloc.0 is 32" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 28" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 22" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 23, ldloc.0 is 33" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 27" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 23" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 24, ldloc.0 is 34" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 26" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 24" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 25, ldloc.0 is 35" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 25" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 25" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 26, ldloc.0 is 36" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 24" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 26" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 27, ldloc.0 is 37" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 23" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 27" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 28, ldloc.0 is 38" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 22" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 28" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 29, ldloc.0 is 39" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 21" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 29" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 30, ldloc.0 is 40" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 20" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 30" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 31, ldloc.0 is 41" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 19" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 31" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 32, ldloc.0 is 42" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 18" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 32" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 33, ldloc.0 is 43" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 17" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 33" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 34, ldloc.0 is 44" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 16" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 34" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 35, ldloc.0 is 45" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 15" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 35" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 36, ldloc.0 is 46" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 14" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 36" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 37, ldloc.0 is 47" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 13" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 37" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 38, ldloc.0 is 48" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 12" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 38" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In try 39, ldloc.0 is 49" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In filter 11" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In except 39" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 9" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 8" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 7" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 6" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 5" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 4" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 3" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 2" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 1" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "In finally 0" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut ldstr "begin" callvirt instance void [mscorlib]System.IO.TextWriter::WriteLine(string) ldloc.s expectedOut newobj instance void [eh_common]TestUtil.TestLog::.ctor(object) stloc.s testLog ldloc.s testLog callvirt instance void [eh_common]TestUtil.TestLog::StartRecording() ldc.i4 0 stloc.0 begin: ldstr "begin" call void [System.Console]System.Console::WriteLine(string) ldloc.0 brtrue done .try { ldstr "In try 0" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 1" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 2" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 3" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 4" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 5" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 6" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 7" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 8" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 .try { ldstr "In try 9" call void [System.Console]System.Console::WriteLine(string) ldloc.0 ldc.i4.1 add stloc.0 label0: .try { ldstr "In try 0, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 50" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 0" call void [System.Console]System.Console::WriteLine(string) label1: .try { ldstr "In try 1, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 49" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 1" call void [System.Console]System.Console::WriteLine(string) label2: .try { ldstr "In try 2, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 48" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 2" call void [System.Console]System.Console::WriteLine(string) label3: .try { ldstr "In try 3, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 47" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 3" call void [System.Console]System.Console::WriteLine(string) label4: .try { ldstr "In try 4, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 46" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 4" call void [System.Console]System.Console::WriteLine(string) label5: .try { ldstr "In try 5, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 45" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 5" call void [System.Console]System.Console::WriteLine(string) label6: .try { ldstr "In try 6, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 44" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 6" call void [System.Console]System.Console::WriteLine(string) label7: .try { ldstr "In try 7, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 43" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 7" call void [System.Console]System.Console::WriteLine(string) label8: .try { ldstr "In try 8, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 42" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 8" call void [System.Console]System.Console::WriteLine(string) label9: .try { ldstr "In try 9, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 41" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 9" call void [System.Console]System.Console::WriteLine(string) label10: .try { ldstr "In try 10, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 40" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 10" call void [System.Console]System.Console::WriteLine(string) label11: .try { ldstr "In try 11, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 39" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 11" call void [System.Console]System.Console::WriteLine(string) label12: .try { ldstr "In try 12, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 38" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 12" call void [System.Console]System.Console::WriteLine(string) label13: .try { ldstr "In try 13, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 37" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 13" call void [System.Console]System.Console::WriteLine(string) label14: .try { ldstr "In try 14, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 36" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 14" call void [System.Console]System.Console::WriteLine(string) label15: .try { ldstr "In try 15, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 35" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 15" call void [System.Console]System.Console::WriteLine(string) label16: .try { ldstr "In try 16, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 34" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 16" call void [System.Console]System.Console::WriteLine(string) label17: .try { ldstr "In try 17, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 33" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 17" call void [System.Console]System.Console::WriteLine(string) label18: .try { ldstr "In try 18, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 32" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 18" call void [System.Console]System.Console::WriteLine(string) label19: .try { ldstr "In try 19, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 31" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 19" call void [System.Console]System.Console::WriteLine(string) label20: .try { ldstr "In try 20, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 30" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 20" call void [System.Console]System.Console::WriteLine(string) label21: .try { ldstr "In try 21, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 29" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 21" call void [System.Console]System.Console::WriteLine(string) label22: .try { ldstr "In try 22, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 28" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 22" call void [System.Console]System.Console::WriteLine(string) label23: .try { ldstr "In try 23, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 27" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 23" call void [System.Console]System.Console::WriteLine(string) label24: .try { ldstr "In try 24, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 26" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 24" call void [System.Console]System.Console::WriteLine(string) label25: .try { ldstr "In try 25, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 25" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 25" call void [System.Console]System.Console::WriteLine(string) label26: .try { ldstr "In try 26, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 24" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 26" call void [System.Console]System.Console::WriteLine(string) label27: .try { ldstr "In try 27, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 23" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 27" call void [System.Console]System.Console::WriteLine(string) label28: .try { ldstr "In try 28, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 22" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 28" call void [System.Console]System.Console::WriteLine(string) label29: .try { ldstr "In try 29, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 21" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 29" call void [System.Console]System.Console::WriteLine(string) label30: .try { ldstr "In try 30, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 20" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 30" call void [System.Console]System.Console::WriteLine(string) label31: .try { ldstr "In try 31, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 19" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 31" call void [System.Console]System.Console::WriteLine(string) label32: .try { ldstr "In try 32, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 18" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 32" call void [System.Console]System.Console::WriteLine(string) label33: .try { ldstr "In try 33, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 17" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 33" call void [System.Console]System.Console::WriteLine(string) label34: .try { ldstr "In try 34, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 16" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 34" call void [System.Console]System.Console::WriteLine(string) label35: .try { ldstr "In try 35, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 15" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 35" call void [System.Console]System.Console::WriteLine(string) label36: .try { ldstr "In try 36, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 14" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 36" call void [System.Console]System.Console::WriteLine(string) label37: .try { ldstr "In try 37, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 13" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 37" call void [System.Console]System.Console::WriteLine(string) label38: .try { ldstr "In try 38, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 12" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 38" call void [System.Console]System.Console::WriteLine(string) label39: .try { ldstr "In try 39, ldloc.0 is " call void [System.Console]System.Console::Write(string) ldloc.0 dup call void [System.Console]System.Console::WriteLine(int32) ldc.i4.1 add stloc.0 newobj instance void [mscorlib]System.Exception::.ctor() throw } filter { pop ldstr "In filter 11" call void [System.Console]System.Console::WriteLine(string) ldc.i4 1 endfilter } { pop ldstr "In except 39" call void [System.Console]System.Console::WriteLine(string) ldloc.0 brfalse.s next0 ldloc.0 ldc.i4 1 beq.s next0a leave begin next0a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label38 next0: leave done } ldloc.0 brfalse.s next1 ldloc.0 ldc.i4 1 beq.s next1a leave begin next1a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label37 next1: leave done } ldloc.0 brfalse.s next2 ldloc.0 ldc.i4 1 beq.s next2a leave begin next2a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label36 next2: leave done } ldloc.0 brfalse.s next3 ldloc.0 ldc.i4 1 beq.s next3a leave begin next3a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label35 next3: leave done } ldloc.0 brfalse.s next4 ldloc.0 ldc.i4 1 beq.s next4a leave begin next4a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label34 next4: leave done } ldloc.0 brfalse.s next5 ldloc.0 ldc.i4 1 beq.s next5a leave begin next5a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label33 next5: leave done } ldloc.0 brfalse.s next6 ldloc.0 ldc.i4 1 beq.s next6a leave begin next6a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label32 next6: leave done } ldloc.0 brfalse.s next7 ldloc.0 ldc.i4 1 beq.s next7a leave begin next7a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label31 next7: leave done } ldloc.0 brfalse.s next8 ldloc.0 ldc.i4 1 beq.s next8a leave begin next8a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label30 next8: leave done } ldloc.0 brfalse.s next9 ldloc.0 ldc.i4 1 beq.s next9a leave begin next9a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label29 next9: leave done } ldloc.0 brfalse.s next10 ldloc.0 ldc.i4 1 beq.s next10a leave begin next10a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label28 next10: leave done } ldloc.0 brfalse.s next11 ldloc.0 ldc.i4 1 beq.s next11a leave begin next11a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label27 next11: leave done } ldloc.0 brfalse.s next12 ldloc.0 ldc.i4 1 beq.s next12a leave begin next12a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label26 next12: leave done } ldloc.0 brfalse.s next13 ldloc.0 ldc.i4 1 beq.s next13a leave begin next13a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label25 next13: leave done } ldloc.0 brfalse.s next14 ldloc.0 ldc.i4 1 beq.s next14a leave begin next14a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label24 next14: leave done } ldloc.0 brfalse.s next15 ldloc.0 ldc.i4 1 beq.s next15a leave begin next15a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label23 next15: leave done } ldloc.0 brfalse.s next16 ldloc.0 ldc.i4 1 beq.s next16a leave begin next16a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label22 next16: leave done } ldloc.0 brfalse.s next17 ldloc.0 ldc.i4 1 beq.s next17a leave begin next17a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label21 next17: leave done } ldloc.0 brfalse.s next18 ldloc.0 ldc.i4 1 beq.s next18a leave begin next18a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label20 next18: leave done } ldloc.0 brfalse.s next19 ldloc.0 ldc.i4 1 beq.s next19a leave begin next19a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label19 next19: leave done } ldloc.0 brfalse.s next20 ldloc.0 ldc.i4 1 beq.s next20a leave begin next20a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label18 next20: leave done } ldloc.0 brfalse.s next21 ldloc.0 ldc.i4 1 beq.s next21a leave begin next21a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label17 next21: leave done } ldloc.0 brfalse.s next22 ldloc.0 ldc.i4 1 beq.s next22a leave begin next22a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label16 next22: leave done } ldloc.0 brfalse.s next23 ldloc.0 ldc.i4 1 beq.s next23a leave begin next23a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label15 next23: leave done } ldloc.0 brfalse.s next24 ldloc.0 ldc.i4 1 beq.s next24a leave begin next24a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label14 next24: leave done } ldloc.0 brfalse.s next25 ldloc.0 ldc.i4 1 beq.s next25a leave begin next25a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label13 next25: leave done } ldloc.0 brfalse.s next26 ldloc.0 ldc.i4 1 beq.s next26a leave begin next26a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label12 next26: leave done } ldloc.0 brfalse.s next27 ldloc.0 ldc.i4 1 beq.s next27a leave begin next27a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label11 next27: leave done } ldloc.0 brfalse.s next28 ldloc.0 ldc.i4 1 beq.s next28a leave begin next28a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label10 next28: leave done } ldloc.0 brfalse.s next29 ldloc.0 ldc.i4 1 beq.s next29a leave begin next29a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label9 next29: leave done } ldloc.0 brfalse.s next30 ldloc.0 ldc.i4 1 beq.s next30a leave begin next30a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label8 next30: leave done } ldloc.0 brfalse.s next31 ldloc.0 ldc.i4 1 beq.s next31a leave begin next31a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label7 next31: leave done } ldloc.0 brfalse.s next32 ldloc.0 ldc.i4 1 beq.s next32a leave begin next32a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label6 next32: leave done } ldloc.0 brfalse.s next33 ldloc.0 ldc.i4 1 beq.s next33a leave begin next33a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label5 next33: leave done } ldloc.0 brfalse.s next34 ldloc.0 ldc.i4 1 beq.s next34a leave begin next34a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label4 next34: leave done } ldloc.0 brfalse.s next35 ldloc.0 ldc.i4 1 beq.s next35a leave begin next35a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label3 next35: leave done } ldloc.0 brfalse.s next36 ldloc.0 ldc.i4 1 beq.s next36a leave begin next36a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label2 next36: leave done } ldloc.0 brfalse.s next37 ldloc.0 ldc.i4 1 beq.s next37a leave begin next37a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label1 next37: leave done } ldloc.0 brfalse.s next38 ldloc.0 ldc.i4 1 beq.s next38a leave begin next38a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label0 next38: leave done } ldloc.0 brfalse.s next39 ldloc.0 ldc.i4 1 beq.s next39a leave begin next39a: ldstr "Unreached !!" call void [System.Console]System.Console::WriteLine(string) leave done leave label0 next39: leave done } } finally { ldstr "In finally 9" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 8" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 7" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 6" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 5" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 4" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 3" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 2" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 1" call void [System.Console]System.Console::WriteLine(string) endfinally } } finally { ldstr "In finally 0" call void [System.Console]System.Console::WriteLine(string) endfinally } done: ldloc.s testLog callvirt instance void [eh_common]TestUtil.TestLog::StopRecording() ldloc.s testLog callvirt instance int32 [eh_common]TestUtil.TestLog::VerifyOutput() ret } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/tests/JIT/IL_Conformance/Old/Conformance_Base/popi.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .class public Dupptr { .field public static int32 FOO .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 20 ldc.i4 0xFAFB0C0D ldsflda int32 Dupptr::FOO pop ldc.i4 0xFAFB0C0D ceq brfalse FAIL ldc.i4 100 ret FAIL: ldc.i4 0x0 ret } } .assembly popi{}
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern legacy library mscorlib {} .class public Dupptr { .field public static int32 FOO .method public static int32 main(class [mscorlib]System.String[]) { .entrypoint .maxstack 20 ldc.i4 0xFAFB0C0D ldsflda int32 Dupptr::FOO pop ldc.i4 0xFAFB0C0D ceq brfalse FAIL ldc.i4 100 ret FAIL: ldc.i4 0x0 ret } } .assembly popi{}
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/libraries/System.Private.Xml.Linq/tests/SDMSample/SDMMisc.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Xml; using System.Xml.Linq; using Xunit; namespace XDocumentTests.SDMSample { public class SDM_Misc { [Fact] public void NodeTypes() { XDocument document = new XDocument(); XElement element = new XElement("x"); XText text = new XText("text-value"); XComment comment = new XComment("comment"); XProcessingInstruction processingInstruction = new XProcessingInstruction("target", "data"); Assert.Equal(XmlNodeType.Document, document.NodeType); Assert.Equal(XmlNodeType.Element, element.NodeType); Assert.Equal(XmlNodeType.Text, text.NodeType); Assert.Equal(XmlNodeType.Comment, comment.NodeType); Assert.Equal(XmlNodeType.ProcessingInstruction, processingInstruction.NodeType); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Xml; using System.Xml.Linq; using Xunit; namespace XDocumentTests.SDMSample { public class SDM_Misc { [Fact] public void NodeTypes() { XDocument document = new XDocument(); XElement element = new XElement("x"); XText text = new XText("text-value"); XComment comment = new XComment("comment"); XProcessingInstruction processingInstruction = new XProcessingInstruction("target", "data"); Assert.Equal(XmlNodeType.Document, document.NodeType); Assert.Equal(XmlNodeType.Element, element.NodeType); Assert.Equal(XmlNodeType.Text, text.NodeType); Assert.Equal(XmlNodeType.Comment, comment.NodeType); Assert.Equal(XmlNodeType.ProcessingInstruction, processingInstruction.NodeType); } } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./eng/pipelines/common/templates/runtimes/send-to-helix-inner-step.yml
parameters: osGroup: '' restoreParams: '' sendParams: '' condition: '' displayName: '' environment: {} shouldContinueOnError: false steps: - ${{ if eq(parameters.osGroup, 'windows') }}: # TODO: Remove and consolidate this when we move to arcade via init-tools.cmd. - powershell: $(Build.SourcesDirectory)\eng\common\build.ps1 -ci ${{ parameters.restoreParams }} displayName: Restore blob feed tasks (Windows) condition: and(succeeded(), ${{ and(ne(parameters.condition, false), ne(parameters.restoreParams, '')) }}) - powershell: $(Build.SourcesDirectory)\eng\common\msbuild.ps1 -ci -warnaserror 0 ${{ parameters.sendParams }} displayName: ${{ parameters.displayName }} (Windows) condition: and(succeeded(), ${{ and(ne(parameters.condition, false), ne(parameters.sendParams, '')) }}) env: ${{ parameters.environment }} continueOnError: ${{ eq(parameters.shouldContinueOnError, true) }} - ${{ if ne(parameters.osGroup, 'windows') }}: # TODO: Remove and consolidate this when we move to arcade via init-tools.sh. - script: $(Build.SourcesDirectory)/eng/common/build.sh --ci --warnaserror false ${{ parameters.restoreParams }} displayName: Restore blob feed tasks (Unix) condition: and(succeeded(), ${{ and(ne(parameters.condition, false), ne(parameters.restoreParams, '')) }}) ${{ if eq(parameters.osGroup, 'FreeBSD') }}: env: # Arcade uses this SDK instead of trying to restore one. DotNetCoreSdkDir: /usr/local/dotnet - script: $(Build.SourcesDirectory)/eng/common/msbuild.sh --ci ${{ parameters.sendParams }} displayName: ${{ parameters.displayName }} (Unix) condition: and(succeeded(), ${{ and(ne(parameters.condition, false), ne(parameters.sendParams, '')) }}) env: ${{ parameters.environment }} continueOnError: ${{ eq(parameters.shouldContinueOnError, true) }}
parameters: osGroup: '' restoreParams: '' sendParams: '' condition: '' displayName: '' environment: {} shouldContinueOnError: false steps: - ${{ if eq(parameters.osGroup, 'windows') }}: # TODO: Remove and consolidate this when we move to arcade via init-tools.cmd. - powershell: $(Build.SourcesDirectory)\eng\common\build.ps1 -ci ${{ parameters.restoreParams }} displayName: Restore blob feed tasks (Windows) condition: and(succeeded(), ${{ and(ne(parameters.condition, false), ne(parameters.restoreParams, '')) }}) - powershell: $(Build.SourcesDirectory)\eng\common\msbuild.ps1 -ci -warnaserror 0 ${{ parameters.sendParams }} displayName: ${{ parameters.displayName }} (Windows) condition: and(succeeded(), ${{ and(ne(parameters.condition, false), ne(parameters.sendParams, '')) }}) env: ${{ parameters.environment }} continueOnError: ${{ eq(parameters.shouldContinueOnError, true) }} - ${{ if ne(parameters.osGroup, 'windows') }}: # TODO: Remove and consolidate this when we move to arcade via init-tools.sh. - script: $(Build.SourcesDirectory)/eng/common/build.sh --ci --warnaserror false ${{ parameters.restoreParams }} displayName: Restore blob feed tasks (Unix) condition: and(succeeded(), ${{ and(ne(parameters.condition, false), ne(parameters.restoreParams, '')) }}) ${{ if eq(parameters.osGroup, 'FreeBSD') }}: env: # Arcade uses this SDK instead of trying to restore one. DotNetCoreSdkDir: /usr/local/dotnet - script: $(Build.SourcesDirectory)/eng/common/msbuild.sh --ci ${{ parameters.sendParams }} displayName: ${{ parameters.displayName }} (Unix) condition: and(succeeded(), ${{ and(ne(parameters.condition, false), ne(parameters.sendParams, '')) }}) env: ${{ parameters.environment }} continueOnError: ${{ eq(parameters.shouldContinueOnError, true) }}
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/coreclr/pal/src/libunwind/doc/unw_is_signal_frame.tex
\documentclass{article} \usepackage[fancyhdr,pdf]{latex2man} \input{common.tex} \begin{document} \begin{Name}{3}{unw\_is\_signal\_frame}{David Mosberger-Tang}{Programming Library}{unw\_is\_signal\_frame}unw\_is\_signal\_frame -- check if current frame is a signal frame \end{Name} \section{Synopsis} \File{\#include $<$libunwind.h$>$}\\ \Type{int} \Func{unw\_is\_signal\_frame}(\Type{unw\_cursor\_t~*}\Var{cp});\\ \section{Description} The \Func{unw\_is\_signal\_frame}() routine returns a positive value if the current frame identified by \Var{cp} is a signal frame, and a value of 0 otherwise. For the purpose of this discussion, a signal frame is a frame that was created in response to a potentially asynchronous interruption. For UNIX and UNIX-like platforms, such frames are normally created by the kernel when delivering a signal. In a kernel-environment, a signal frame might, for example, correspond to a frame created in response to a device interrupt. Signal frames are somewhat unusual because the asynchronous nature of the events that create them require storing the contents of registers that are normally treated as scratch (``caller-saved'') registers. \section{Return Value} On successful completion, \Func{unw\_is\_signal\_frame}() returns a positive value if the current frame is a signal frame, or 0 if it is not. Otherwise, a negative value of one of the error-codes below is returned. \section{Thread and Signal Safety} \Func{unw\_is\_signal\_frame}() is thread-safe as well as safe to use from a signal handler. \section{Errors} \begin{Description} \item[\Const{UNW\_ENOINFO}] \Prog{Libunwind} is unable to determine whether or not the current frame is a signal frame. \end{Description} \section{See Also} \SeeAlso{libunwind(3)}, \SeeAlso{unw\_get\_reg(3)}, \SeeAlso{unw\_set\_reg(3)}, \SeeAlso{unw\_get\_fpreg(3)}, \SeeAlso{unw\_set\_fpreg(3)} \section{Author} \noindent David Mosberger-Tang\\ Email: \Email{[email protected]}\\ WWW: \URL{http://www.nongnu.org/libunwind/}. \LatexManEnd \end{document}
\documentclass{article} \usepackage[fancyhdr,pdf]{latex2man} \input{common.tex} \begin{document} \begin{Name}{3}{unw\_is\_signal\_frame}{David Mosberger-Tang}{Programming Library}{unw\_is\_signal\_frame}unw\_is\_signal\_frame -- check if current frame is a signal frame \end{Name} \section{Synopsis} \File{\#include $<$libunwind.h$>$}\\ \Type{int} \Func{unw\_is\_signal\_frame}(\Type{unw\_cursor\_t~*}\Var{cp});\\ \section{Description} The \Func{unw\_is\_signal\_frame}() routine returns a positive value if the current frame identified by \Var{cp} is a signal frame, and a value of 0 otherwise. For the purpose of this discussion, a signal frame is a frame that was created in response to a potentially asynchronous interruption. For UNIX and UNIX-like platforms, such frames are normally created by the kernel when delivering a signal. In a kernel-environment, a signal frame might, for example, correspond to a frame created in response to a device interrupt. Signal frames are somewhat unusual because the asynchronous nature of the events that create them require storing the contents of registers that are normally treated as scratch (``caller-saved'') registers. \section{Return Value} On successful completion, \Func{unw\_is\_signal\_frame}() returns a positive value if the current frame is a signal frame, or 0 if it is not. Otherwise, a negative value of one of the error-codes below is returned. \section{Thread and Signal Safety} \Func{unw\_is\_signal\_frame}() is thread-safe as well as safe to use from a signal handler. \section{Errors} \begin{Description} \item[\Const{UNW\_ENOINFO}] \Prog{Libunwind} is unable to determine whether or not the current frame is a signal frame. \end{Description} \section{See Also} \SeeAlso{libunwind(3)}, \SeeAlso{unw\_get\_reg(3)}, \SeeAlso{unw\_set\_reg(3)}, \SeeAlso{unw\_get\_fpreg(3)}, \SeeAlso{unw\_set\_fpreg(3)} \section{Author} \noindent David Mosberger-Tang\\ Email: \Email{[email protected]}\\ WWW: \URL{http://www.nongnu.org/libunwind/}. \LatexManEnd \end{document}
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/libraries/Microsoft.Extensions.Configuration/tests/ConfigurationPathComparerTest.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Xunit; namespace Microsoft.Extensions.Configuration.Test { public class ConfigurationPathComparerTest { [Fact] public void CompareWithNull() { ComparerTest(null, null, 0); ComparerTest(null, "a", -1); ComparerTest("b", null, 1); } [Fact] public void CompareWithSameLength() { ComparerTest("a", "a", 0); ComparerTest("a", "A", 0); ComparerTest("aB", "Ab", 0); } [Fact] public void CompareWithDifferentLengths() { ComparerTest("a", "aa", -1); ComparerTest("aa", "a", 1); } [Fact] public void CompareWithLetters() { ComparerTest("a", "b", -1); ComparerTest("b", "a", 1); } [Fact] public void CompareWithNumbers() { ComparerTest("000", "0", 0); ComparerTest("001", "1", 0); ComparerTest("1", "1", 0); ComparerTest("1", "10", -1); ComparerTest("10", "1", 1); ComparerTest("2", "10", -1); ComparerTest("10", "2", 1); } [Fact] public void CompareWithNumbersAndLetters() { ComparerTest("1", "a", -1); ComparerTest("a", "1", 1); ComparerTest("100", "a", -1); ComparerTest("a", "100", 1); } [Fact] public void CompareWithNonNumbers() { ComparerTest("1a", "100", 1); ComparerTest("100", "1a", -1); ComparerTest("100a", "100", 1); ComparerTest("100", "100a", -1); ComparerTest("a100", "100", 1); ComparerTest("100", "a100", -1); ComparerTest("1a", "a", -1); ComparerTest("a", "1a", 1); } [Fact] public void CompareIdenticalPaths() { ComparerTest("abc:DEF:0:a100", "ABC:DEF:0:a100", 0); } [Fact] public void CompareDifferentPaths() { ComparerTest("abc:def", "ghi:2", -1); ComparerTest("ghi:2", "abc:def", 1); } [Fact] public void ComparePathsWithCommonPart() { ComparerTest("abc:def:XYQ", "abc:def:XYZ", -1); ComparerTest("abc:def:XYZ", "abc:def:XYQ", 1); } [Fact] public void ComparePathsWithCommonPartButShorter() { ComparerTest("abc:def", "abc:def:ghi", -1); ComparerTest("abc:def:ghi", "abc:def", 1); } [Fact] public void ComparePathsWithIndicesAtTheEnd() { ComparerTest("abc:def:2", "abc:def:10", -1); ComparerTest("abc:def:10", "abc:def:2", 1); ComparerTest("abc:def:10", "abc:def:22", -1); ComparerTest("abc:def:22", "abc:def:10", 1); } [Fact] public void ComparePathsWithIndicesInside() { ComparerTest("abc:def:1000:jkl", "abc:def:ghi:jkl", -1); ComparerTest("abc:def:ghi:jkl", "abc:def:1000:jkl", 1); ComparerTest("abc:def:10:jkl", "abc:def:22:jkl", -1); ComparerTest("abc:def:22:jkl", "abc:def:10:jkl", 1); } private static void ComparerTest(string a, string b, int expectedSign) { var result = ConfigurationKeyComparer.Instance.Compare(a, b); Assert.Equal(expectedSign, Math.Sign(result)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Xunit; namespace Microsoft.Extensions.Configuration.Test { public class ConfigurationPathComparerTest { [Fact] public void CompareWithNull() { ComparerTest(null, null, 0); ComparerTest(null, "a", -1); ComparerTest("b", null, 1); } [Fact] public void CompareWithSameLength() { ComparerTest("a", "a", 0); ComparerTest("a", "A", 0); ComparerTest("aB", "Ab", 0); } [Fact] public void CompareWithDifferentLengths() { ComparerTest("a", "aa", -1); ComparerTest("aa", "a", 1); } [Fact] public void CompareWithLetters() { ComparerTest("a", "b", -1); ComparerTest("b", "a", 1); } [Fact] public void CompareWithNumbers() { ComparerTest("000", "0", 0); ComparerTest("001", "1", 0); ComparerTest("1", "1", 0); ComparerTest("1", "10", -1); ComparerTest("10", "1", 1); ComparerTest("2", "10", -1); ComparerTest("10", "2", 1); } [Fact] public void CompareWithNumbersAndLetters() { ComparerTest("1", "a", -1); ComparerTest("a", "1", 1); ComparerTest("100", "a", -1); ComparerTest("a", "100", 1); } [Fact] public void CompareWithNonNumbers() { ComparerTest("1a", "100", 1); ComparerTest("100", "1a", -1); ComparerTest("100a", "100", 1); ComparerTest("100", "100a", -1); ComparerTest("a100", "100", 1); ComparerTest("100", "a100", -1); ComparerTest("1a", "a", -1); ComparerTest("a", "1a", 1); } [Fact] public void CompareIdenticalPaths() { ComparerTest("abc:DEF:0:a100", "ABC:DEF:0:a100", 0); } [Fact] public void CompareDifferentPaths() { ComparerTest("abc:def", "ghi:2", -1); ComparerTest("ghi:2", "abc:def", 1); } [Fact] public void ComparePathsWithCommonPart() { ComparerTest("abc:def:XYQ", "abc:def:XYZ", -1); ComparerTest("abc:def:XYZ", "abc:def:XYQ", 1); } [Fact] public void ComparePathsWithCommonPartButShorter() { ComparerTest("abc:def", "abc:def:ghi", -1); ComparerTest("abc:def:ghi", "abc:def", 1); } [Fact] public void ComparePathsWithIndicesAtTheEnd() { ComparerTest("abc:def:2", "abc:def:10", -1); ComparerTest("abc:def:10", "abc:def:2", 1); ComparerTest("abc:def:10", "abc:def:22", -1); ComparerTest("abc:def:22", "abc:def:10", 1); } [Fact] public void ComparePathsWithIndicesInside() { ComparerTest("abc:def:1000:jkl", "abc:def:ghi:jkl", -1); ComparerTest("abc:def:ghi:jkl", "abc:def:1000:jkl", 1); ComparerTest("abc:def:10:jkl", "abc:def:22:jkl", -1); ComparerTest("abc:def:22:jkl", "abc:def:10:jkl", 1); } private static void ComparerTest(string a, string b, int expectedSign) { var result = ConfigurationKeyComparer.Instance.Compare(a, b); Assert.Equal(expectedSign, Math.Sign(result)); } } }
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/tests/JIT/jit64/opt/cse/staticFieldExpr1_ro_loop.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <!-- Set to 'Full' if the Debug? column is marked in the spreadsheet. Leave blank otherwise. --> <DebugType>None</DebugType> <Optimize>True</Optimize> <NoStandardLib>True</NoStandardLib> <Noconfig>True</Noconfig> <DefineConstants>$(DefineConstants);LOOP</DefineConstants> </PropertyGroup> <ItemGroup> <Compile Include="staticFieldExpr1.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <!-- Set to 'Full' if the Debug? column is marked in the spreadsheet. Leave blank otherwise. --> <DebugType>None</DebugType> <Optimize>True</Optimize> <NoStandardLib>True</NoStandardLib> <Noconfig>True</Noconfig> <DefineConstants>$(DefineConstants);LOOP</DefineConstants> </PropertyGroup> <ItemGroup> <Compile Include="staticFieldExpr1.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/fft17.txt
<?xml version="1.0" encoding="utf-8"?>Hello, world!
<?xml version="1.0" encoding="utf-8"?>Hello, world!
-1
dotnet/runtime
66,211
[mono] Remove SkipVerification support from the runtime
CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
akoeplinger
2022-03-04T19:47:04Z
2022-03-06T13:44:33Z
b463b1630dbf1be5b013208a9fa73e1ecd6c774c
be629f49a350d526de2c65981294734cee420b90
[mono] Remove SkipVerification support from the runtime. CAS support was removed in .NET Core. This allows us removing a bunch of code that is unused, e.g. the dependency on libiconv.
./src/tests/Loader/classloader/TypeInitialization/CircularCctors/CircularCctorThreeThreads01.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /* A --> B --> C --> D --> E --> A 3 threads: Thread T1 starts initialization at A, thread T2 starts initialization at C, and thread T3 starts initialization at E. This should form a three thread deadlock, which we will detect and allow one of the three threads to proceed, breaking the deadlock. */ using System; using System.Threading; using System.Runtime.CompilerServices; public class A { public static int i; static A() { Console.WriteLine("In A.cctor: thread {0}: B.i {1}",Thread.CurrentThread.Name,B.i); A.i = 5; } // invoking this should trigger the cctor [MethodImpl(MethodImplOptions.NoInlining)] public static void SomeMethod() { Console.WriteLine("In MyClass.SomeMethod(): thread {0}",Thread.CurrentThread.Name); } } public class B { public static int i; static B() { Console.WriteLine("In B.cctor: thread {0}: C.i {1}",Thread.CurrentThread.Name,C.i); B.i = 6; } // invoking this should trigger the cctor [MethodImpl(MethodImplOptions.NoInlining)] public static void SomeMethod() { Console.WriteLine("In MyClass.SomeMethod(): thread {0}",Thread.CurrentThread.Name); } } public class C { public static int i; static C() { Console.WriteLine("In C.cctor: thread {0}: D.i {1}",Thread.CurrentThread.Name,D.i); C.i = 7; } // invoking this should trigger the cctor [MethodImpl(MethodImplOptions.NoInlining)] public static void SomeMethod() { Console.WriteLine("In MyClass.SomeMethod(): thread {0}",Thread.CurrentThread.Name); } } public class D { public static int i; static D() { Console.WriteLine("In D.cctor: thread {0}: E.i {1}",Thread.CurrentThread.Name,E.i); D.i = 8; } // invoking this should trigger the cctor [MethodImpl(MethodImplOptions.NoInlining)] public static void SomeMethod() { Console.WriteLine("In MyClass.SomeMethod(): thread {0}",Thread.CurrentThread.Name); } } public class E { public static int i; static E() { Console.WriteLine("In E.cctor: thread {0}: A.i {1}",Thread.CurrentThread.Name,A.i); E.i = 9; } // invoking this should trigger the cctor [MethodImpl(MethodImplOptions.NoInlining)] public static void SomeMethod() { Console.WriteLine("In MyClass.SomeMethod(): thread {0}",Thread.CurrentThread.Name); } } public class Test_CircularCctorThreeThreads01 { public static void RunGetA() { A.SomeMethod(); } public static void RunGetC() { C.SomeMethod(); } public static void RunGetE() { E.SomeMethod(); } public static int Main() { Thread t1 = new Thread(RunGetA); t1.Name = "T1"; Thread t2 = new Thread(RunGetC); t2.Name = "T2"; Thread t3 = new Thread(RunGetE); t3.Name = "T3"; t1.Start(); Thread.Sleep(1000*1); // 1 second t2.Start(); Thread.Sleep(1000*1); // 1 second t3.Start(); t3.Join(); t2.Join(); t1.Join(); // make sure that statics were set correctly if ( A.i == 5 && B.i == 6 && C.i == 7 && D.i == 8 && E.i == 9 ) { Console.WriteLine("PASS"); return 100; } else { Console.WriteLine("FAIL"); return 101; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /* A --> B --> C --> D --> E --> A 3 threads: Thread T1 starts initialization at A, thread T2 starts initialization at C, and thread T3 starts initialization at E. This should form a three thread deadlock, which we will detect and allow one of the three threads to proceed, breaking the deadlock. */ using System; using System.Threading; using System.Runtime.CompilerServices; public class A { public static int i; static A() { Console.WriteLine("In A.cctor: thread {0}: B.i {1}",Thread.CurrentThread.Name,B.i); A.i = 5; } // invoking this should trigger the cctor [MethodImpl(MethodImplOptions.NoInlining)] public static void SomeMethod() { Console.WriteLine("In MyClass.SomeMethod(): thread {0}",Thread.CurrentThread.Name); } } public class B { public static int i; static B() { Console.WriteLine("In B.cctor: thread {0}: C.i {1}",Thread.CurrentThread.Name,C.i); B.i = 6; } // invoking this should trigger the cctor [MethodImpl(MethodImplOptions.NoInlining)] public static void SomeMethod() { Console.WriteLine("In MyClass.SomeMethod(): thread {0}",Thread.CurrentThread.Name); } } public class C { public static int i; static C() { Console.WriteLine("In C.cctor: thread {0}: D.i {1}",Thread.CurrentThread.Name,D.i); C.i = 7; } // invoking this should trigger the cctor [MethodImpl(MethodImplOptions.NoInlining)] public static void SomeMethod() { Console.WriteLine("In MyClass.SomeMethod(): thread {0}",Thread.CurrentThread.Name); } } public class D { public static int i; static D() { Console.WriteLine("In D.cctor: thread {0}: E.i {1}",Thread.CurrentThread.Name,E.i); D.i = 8; } // invoking this should trigger the cctor [MethodImpl(MethodImplOptions.NoInlining)] public static void SomeMethod() { Console.WriteLine("In MyClass.SomeMethod(): thread {0}",Thread.CurrentThread.Name); } } public class E { public static int i; static E() { Console.WriteLine("In E.cctor: thread {0}: A.i {1}",Thread.CurrentThread.Name,A.i); E.i = 9; } // invoking this should trigger the cctor [MethodImpl(MethodImplOptions.NoInlining)] public static void SomeMethod() { Console.WriteLine("In MyClass.SomeMethod(): thread {0}",Thread.CurrentThread.Name); } } public class Test_CircularCctorThreeThreads01 { public static void RunGetA() { A.SomeMethod(); } public static void RunGetC() { C.SomeMethod(); } public static void RunGetE() { E.SomeMethod(); } public static int Main() { Thread t1 = new Thread(RunGetA); t1.Name = "T1"; Thread t2 = new Thread(RunGetC); t2.Name = "T2"; Thread t3 = new Thread(RunGetE); t3.Name = "T3"; t1.Start(); Thread.Sleep(1000*1); // 1 second t2.Start(); Thread.Sleep(1000*1); // 1 second t3.Start(); t3.Join(); t2.Join(); t1.Join(); // make sure that statics were set correctly if ( A.i == 5 && B.i == 6 && C.i == 7 && D.i == 8 && E.i == 9 ) { Console.WriteLine("PASS"); return 100; } else { Console.WriteLine("FAIL"); return 101; } } }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/jit/block.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX BasicBlock XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "jitstd/algorithm.h" #if MEASURE_BLOCK_SIZE /* static */ size_t BasicBlock::s_Size; /* static */ size_t BasicBlock::s_Count; #endif // MEASURE_BLOCK_SIZE #ifdef DEBUG // The max # of tree nodes in any BB /* static */ unsigned BasicBlock::s_nMaxTrees; #endif // DEBUG #ifdef DEBUG flowList* ShuffleHelper(unsigned hash, flowList* res) { flowList* head = res; for (flowList *prev = nullptr; res != nullptr; prev = res, res = res->flNext) { unsigned blkHash = (hash ^ (res->getBlock()->bbNum << 16) ^ res->getBlock()->bbNum); if (((blkHash % 1879) & 1) && prev != nullptr) { // Swap res with head. prev->flNext = head; std::swap(head->flNext, res->flNext); std::swap(head, res); } } return head; } unsigned SsaStressHashHelper() { // hash = 0: turned off, hash = 1: use method hash, hash = *: use custom hash. unsigned hash = JitConfig.JitSsaStress(); if (hash == 0) { return hash; } if (hash == 1) { return JitTls::GetCompiler()->info.compMethodHash(); } return ((hash >> 16) == 0) ? ((hash << 16) | hash) : hash; } #endif EHSuccessorIterPosition::EHSuccessorIterPosition(Compiler* comp, BasicBlock* block) : m_remainingRegSuccs(block->NumSucc(comp)), m_curRegSucc(nullptr), m_curTry(comp->ehGetBlockExnFlowDsc(block)) { // If "block" is a "leave helper" block (the empty BBJ_ALWAYS block that pairs with a // preceding BBJ_CALLFINALLY block to implement a "leave" IL instruction), then no exceptions // can occur within it, so clear m_curTry if it's non-null. if (m_curTry != nullptr) { if (block->isBBCallAlwaysPairTail()) { m_curTry = nullptr; } } if (m_curTry == nullptr && m_remainingRegSuccs > 0) { // Examine the successors to see if any are the start of try blocks. FindNextRegSuccTry(comp, block); } } void EHSuccessorIterPosition::FindNextRegSuccTry(Compiler* comp, BasicBlock* block) { assert(m_curTry == nullptr); // Must now consider the next regular successor, if any. while (m_remainingRegSuccs > 0) { m_remainingRegSuccs--; m_curRegSucc = block->GetSucc(m_remainingRegSuccs, comp); if (comp->bbIsTryBeg(m_curRegSucc)) { assert(m_curRegSucc->hasTryIndex()); // Since it is a try begin. unsigned newTryIndex = m_curRegSucc->getTryIndex(); // If the try region started by "m_curRegSucc" (represented by newTryIndex) contains m_block, // we've already yielded its handler, as one of the EH handler successors of m_block itself. if (comp->bbInExnFlowRegions(newTryIndex, block)) { continue; } // Otherwise, consider this try. m_curTry = comp->ehGetDsc(newTryIndex); break; } } } void EHSuccessorIterPosition::Advance(Compiler* comp, BasicBlock* block) { assert(m_curTry != nullptr); if (m_curTry->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) { m_curTry = comp->ehGetDsc(m_curTry->ebdEnclosingTryIndex); // If we've gone over into considering try's containing successors, // then the enclosing try must have the successor as its first block. if (m_curRegSucc == nullptr || m_curTry->ebdTryBeg == m_curRegSucc) { return; } // Otherwise, give up, try the next regular successor. m_curTry = nullptr; } else { m_curTry = nullptr; } // We've exhausted all try blocks. // See if there are any remaining regular successors that start try blocks. FindNextRegSuccTry(comp, block); } BasicBlock* EHSuccessorIterPosition::Current(Compiler* comp, BasicBlock* block) { assert(m_curTry != nullptr); return m_curTry->ExFlowBlock(); } flowList* Compiler::BlockPredsWithEH(BasicBlock* blk) { BlockToFlowListMap* ehPreds = GetBlockToEHPreds(); flowList* res; if (ehPreds->Lookup(blk, &res)) { return res; } res = blk->bbPreds; unsigned tryIndex; if (bbIsExFlowBlock(blk, &tryIndex)) { // Find the first block of the try. EHblkDsc* ehblk = ehGetDsc(tryIndex); BasicBlock* tryStart = ehblk->ebdTryBeg; for (BasicBlock* const tryStartPredBlock : tryStart->PredBlocks()) { res = new (this, CMK_FlowList) flowList(tryStartPredBlock, res); #if MEASURE_BLOCK_SIZE genFlowNodeCnt += 1; genFlowNodeSize += sizeof(flowList); #endif // MEASURE_BLOCK_SIZE } // Now add all blocks handled by this handler (except for second blocks of BBJ_CALLFINALLY/BBJ_ALWAYS pairs; // these cannot cause transfer to the handler...) // TODO-Throughput: It would be nice if we could iterate just over the blocks in the try, via // something like: // for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->bbNext; bb = bb->bbNext) // (plus adding in any filter blocks outside the try whose exceptions are handled here). // That doesn't work, however: funclets have caused us to sometimes split the body of a try into // more than one sequence of contiguous blocks. We need to find a better way to do this. for (BasicBlock* const bb : Blocks()) { if (bbInExnFlowRegions(tryIndex, bb) && !bb->isBBCallAlwaysPairTail()) { res = new (this, CMK_FlowList) flowList(bb, res); #if MEASURE_BLOCK_SIZE genFlowNodeCnt += 1; genFlowNodeSize += sizeof(flowList); #endif // MEASURE_BLOCK_SIZE } } #ifdef DEBUG unsigned hash = SsaStressHashHelper(); if (hash != 0) { res = ShuffleHelper(hash, res); } #endif // DEBUG ehPreds->Set(blk, res); } return res; } //------------------------------------------------------------------------ // checkPredListOrder: see if pred list is properly ordered // // Returns: // false if pred list is not in increasing bbNum order. // bool BasicBlock::checkPredListOrder() { unsigned lastBBNum = 0; for (BasicBlock* const predBlock : PredBlocks()) { const unsigned bbNum = predBlock->bbNum; if (bbNum <= lastBBNum) { assert(bbNum != lastBBNum); return false; } lastBBNum = bbNum; } return true; } //------------------------------------------------------------------------ // ensurePredListOrder: ensure all pred list entries appear in increasing // bbNum order. // // Arguments: // compiler - current compiler instance // void BasicBlock::ensurePredListOrder(Compiler* compiler) { // First, check if list is already in order. // if (checkPredListOrder()) { return; } reorderPredList(compiler); assert(checkPredListOrder()); } //------------------------------------------------------------------------ // reorderPredList: relink pred list in increasing bbNum order. // // Arguments: // compiler - current compiler instance // void BasicBlock::reorderPredList(Compiler* compiler) { // Count number or entries. // int count = 0; for (flowList* const pred : PredEdges()) { count++; } // If only 0 or 1 entry, nothing to reorder. // if (count < 2) { return; } // Allocate sort vector if needed. // if (compiler->fgPredListSortVector == nullptr) { CompAllocator allocator = compiler->getAllocator(CMK_FlowList); compiler->fgPredListSortVector = new (allocator) jitstd::vector<flowList*>(allocator); } jitstd::vector<flowList*>* const sortVector = compiler->fgPredListSortVector; sortVector->clear(); // Fill in the vector from the list. // for (flowList* const pred : PredEdges()) { sortVector->push_back(pred); } // Sort by increasing bbNum // struct flowListBBNumCmp { bool operator()(const flowList* f1, const flowList* f2) { return f1->getBlock()->bbNum < f2->getBlock()->bbNum; } }; jitstd::sort(sortVector->begin(), sortVector->end(), flowListBBNumCmp()); // Rethread the list. // flowList* last = nullptr; for (flowList* current : *sortVector) { if (last == nullptr) { bbPreds = current; } else { last->flNext = current; } last = current; } last->flNext = nullptr; // Note this lastPred is only used transiently. // bbLastPred = last; } #ifdef DEBUG //------------------------------------------------------------------------ // dspBlockILRange(): Display the block's IL range as [XXX...YYY), where XXX and YYY might be "???" for BAD_IL_OFFSET. // void BasicBlock::dspBlockILRange() const { if (bbCodeOffs != BAD_IL_OFFSET) { printf("[%03X..", bbCodeOffs); } else { printf("[???" ".."); } if (bbCodeOffsEnd != BAD_IL_OFFSET) { // brace-matching editor workaround for following line: ( printf("%03X)", bbCodeOffsEnd); } else { // brace-matching editor workaround for following line: ( printf("???" ")"); } } //------------------------------------------------------------------------ // dspFlags: Print out the block's flags // void BasicBlock::dspFlags() { if (bbFlags & BBF_VISITED) { printf("v "); } if (bbFlags & BBF_MARKED) { printf("m "); } if (bbFlags & BBF_CHANGED) { printf("! "); } if (bbFlags & BBF_REMOVED) { printf("del "); } if (bbFlags & BBF_DONT_REMOVE) { printf("keep "); } if (bbFlags & BBF_IMPORTED) { printf("i "); } if (bbFlags & BBF_INTERNAL) { printf("internal "); } if (bbFlags & BBF_FAILED_VERIFICATION) { printf("failV "); } if (bbFlags & BBF_TRY_BEG) { printf("try "); } if (bbFlags & BBF_RUN_RARELY) { printf("rare "); } if (bbFlags & BBF_LOOP_HEAD) { printf("Loop "); } if (bbFlags & BBF_LOOP_CALL0) { printf("Loop0 "); } if (bbFlags & BBF_LOOP_CALL1) { printf("Loop1 "); } if (bbFlags & BBF_HAS_LABEL) { printf("label "); } if (bbFlags & BBF_HAS_JMP) { printf("jmp "); } if (bbFlags & BBF_HAS_CALL) { printf("hascall "); } if (bbFlags & BBF_GC_SAFE_POINT) { printf("gcsafe "); } if (bbFlags & BBF_FUNCLET_BEG) { printf("flet "); } if (bbFlags & BBF_HAS_IDX_LEN) { printf("idxlen "); } if (bbFlags & BBF_HAS_NEWARRAY) { printf("new[] "); } if (bbFlags & BBF_HAS_NEWOBJ) { printf("newobj "); } if (bbFlags & BBF_HAS_NULLCHECK) { printf("nullcheck "); } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (bbFlags & BBF_FINALLY_TARGET) { printf("ftarget "); } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (bbFlags & BBF_BACKWARD_JUMP) { printf("bwd "); } if (bbFlags & BBF_BACKWARD_JUMP_TARGET) { printf("bwd-target "); } if (bbFlags & BBF_PATCHPOINT) { printf("ppoint "); } if (bbFlags & BBF_PARTIAL_COMPILATION_PATCHPOINT) { printf("pc-ppoint "); } if (bbFlags & BBF_RETLESS_CALL) { printf("retless "); } if (bbFlags & BBF_LOOP_PREHEADER) { printf("LoopPH "); } if (bbFlags & BBF_COLD) { printf("cold "); } if (bbFlags & BBF_PROF_WEIGHT) { printf("IBC "); } if (bbFlags & BBF_IS_LIR) { printf("LIR "); } if (bbFlags & BBF_KEEP_BBJ_ALWAYS) { printf("KEEP "); } if (bbFlags & BBF_CLONED_FINALLY_BEGIN) { printf("cfb "); } if (bbFlags & BBF_CLONED_FINALLY_END) { printf("cfe "); } if (bbFlags & BBF_LOOP_ALIGN) { printf("align "); } } /***************************************************************************** * * Display the bbPreds basic block list (the block predecessors). * Returns the number of characters printed. */ unsigned BasicBlock::dspPreds() { unsigned count = 0; for (flowList* const pred : PredEdges()) { if (count != 0) { printf(","); count += 1; } printf(FMT_BB, pred->getBlock()->bbNum); count += 4; // Account for %02u only handling 2 digits, but we can display more than that. unsigned digits = CountDigits(pred->getBlock()->bbNum); if (digits > 2) { count += digits - 2; } // Does this predecessor have an interesting dup count? If so, display it. if (pred->flDupCount > 1) { printf("(%u)", pred->flDupCount); count += 2 + CountDigits(pred->flDupCount); } } return count; } /***************************************************************************** * * Display the bbCheapPreds basic block list (the block predecessors). * Returns the number of characters printed. */ unsigned BasicBlock::dspCheapPreds() { unsigned count = 0; for (BasicBlockList* pred = bbCheapPreds; pred != nullptr; pred = pred->next) { if (count != 0) { printf(","); count += 1; } printf(FMT_BB, pred->block->bbNum); count += 4; // Account for %02u only handling 2 digits, but we can display more than that. unsigned digits = CountDigits(pred->block->bbNum); if (digits > 2) { count += digits - 2; } } return count; } //------------------------------------------------------------------------ // dspSuccs: Display the basic block successors. // // Arguments: // compiler - compiler instance; passed to NumSucc(Compiler*) -- see that function for implications. // void BasicBlock::dspSuccs(Compiler* compiler) { bool first = true; // If this is a switch, we don't want to call `Succs(Compiler*)` because it will eventually call // `GetSwitchDescMap()`, and that will have the side-effect of allocating the unique switch descriptor map // and/or compute this switch block's unique succ set if it is not present. Debug output functions should // never have an effect on codegen. We also don't want to assume the unique succ set is accurate, so we // compute it ourselves here. if (bbJumpKind == BBJ_SWITCH) { // Create a set with all the successors. Don't use BlockSet, so we don't need to worry // about the BlockSet epoch. unsigned bbNumMax = compiler->impInlineRoot()->fgBBNumMax; BitVecTraits bitVecTraits(bbNumMax + 1, compiler); BitVec uniqueSuccBlocks(BitVecOps::MakeEmpty(&bitVecTraits)); for (BasicBlock* const bTarget : SwitchTargets()) { BitVecOps::AddElemD(&bitVecTraits, uniqueSuccBlocks, bTarget->bbNum); } BitVecOps::Iter iter(&bitVecTraits, uniqueSuccBlocks); unsigned bbNum = 0; while (iter.NextElem(&bbNum)) { // Note that we will output switch successors in increasing numerical bbNum order, which is // not related to their order in the bbJumpSwt->bbsDstTab table. printf("%s" FMT_BB, first ? "" : ",", bbNum); first = false; } } else { for (BasicBlock* const succ : Succs(compiler)) { printf("%s" FMT_BB, first ? "" : ",", succ->bbNum); first = false; } } } // Display a compact representation of the bbJumpKind, that is, where this block branches. // This is similar to code in Compiler::fgTableDispBasicBlock(), but doesn't have that code's requirements to align // things strictly. void BasicBlock::dspJumpKind() { switch (bbJumpKind) { case BBJ_EHFINALLYRET: printf(" (finret)"); break; case BBJ_EHFILTERRET: printf(" (fltret)"); break; case BBJ_EHCATCHRET: printf(" -> " FMT_BB " (cret)", bbJumpDest->bbNum); break; case BBJ_THROW: printf(" (throw)"); break; case BBJ_RETURN: printf(" (return)"); break; case BBJ_NONE: // For fall-through blocks, print nothing. break; case BBJ_ALWAYS: if (bbFlags & BBF_KEEP_BBJ_ALWAYS) { printf(" -> " FMT_BB " (ALWAYS)", bbJumpDest->bbNum); } else { printf(" -> " FMT_BB " (always)", bbJumpDest->bbNum); } break; case BBJ_LEAVE: printf(" -> " FMT_BB " (leave)", bbJumpDest->bbNum); break; case BBJ_CALLFINALLY: printf(" -> " FMT_BB " (callf)", bbJumpDest->bbNum); break; case BBJ_COND: printf(" -> " FMT_BB " (cond)", bbJumpDest->bbNum); break; case BBJ_SWITCH: { printf(" ->"); const unsigned jumpCnt = bbJumpSwt->bbsCount; BasicBlock** const jumpTab = bbJumpSwt->bbsDstTab; for (unsigned i = 0; i < jumpCnt; i++) { printf("%c" FMT_BB, (i == 0) ? ' ' : ',', jumpTab[i]->bbNum); const bool isDefault = bbJumpSwt->bbsHasDefault && (i == jumpCnt - 1); if (isDefault) { printf("[def]"); } const bool isDominant = bbJumpSwt->bbsHasDominantCase && (i == bbJumpSwt->bbsDominantCase); if (isDominant) { printf("[dom(" FMT_WT ")]", bbJumpSwt->bbsDominantFraction); } } printf(" (switch)"); } break; default: unreached(); break; } } void BasicBlock::dspBlockHeader(Compiler* compiler, bool showKind /*= true*/, bool showFlags /*= false*/, bool showPreds /*= true*/) { printf(FMT_BB " ", bbNum); dspBlockILRange(); if (showKind) { dspJumpKind(); } if (showPreds) { printf(", preds={"); if (compiler->fgCheapPredsValid) { dspCheapPreds(); } else { dspPreds(); } printf("} succs={"); dspSuccs(compiler); printf("}"); } if (showFlags) { const unsigned lowFlags = (unsigned)bbFlags; const unsigned highFlags = (unsigned)(bbFlags >> 32); printf(" flags=0x%08x.%08x: ", highFlags, lowFlags); dspFlags(); } printf("\n"); } const char* BasicBlock::dspToString(int blockNumPadding /* = 0 */) { static char buffers[3][64]; // static array of 3 to allow 3 concurrent calls in one printf() static int nextBufferIndex = 0; auto& buffer = buffers[nextBufferIndex]; nextBufferIndex = (nextBufferIndex + 1) % ArrLen(buffers); _snprintf_s(buffer, ArrLen(buffer), ArrLen(buffer), FMT_BB "%*s [%04u]", bbNum, blockNumPadding, "", bbID); return buffer; } #endif // DEBUG // Allocation function for MemoryPhiArg. void* BasicBlock::MemoryPhiArg::operator new(size_t sz, Compiler* comp) { return comp->getAllocator(CMK_MemoryPhiArg).allocate<char>(sz); } //------------------------------------------------------------------------ // CloneBlockState: Try to populate `to` block with a copy of `from` block's statements, replacing // uses of local `varNum` with IntCns `varVal`. // // Arguments: // compiler - Jit compiler instance // to - New/empty block to copy statements into // from - Block to copy statements from // varNum - lclVar uses with lclNum `varNum` will be replaced; can be ~0 to indicate no replacement. // varVal - If replacing uses of `varNum`, replace them with int constants with value `varVal`. // // Return Value: // Cloning may fail because this routine uses `gtCloneExpr` for cloning and it can't handle all // IR nodes. If cloning of any statement fails, `false` will be returned and block `to` may be // partially populated. If cloning of all statements succeeds, `true` will be returned and // block `to` will be fully populated. bool BasicBlock::CloneBlockState( Compiler* compiler, BasicBlock* to, const BasicBlock* from, unsigned varNum, int varVal) { assert(to->bbStmtList == nullptr); to->bbFlags = from->bbFlags; to->bbWeight = from->bbWeight; BlockSetOps::AssignAllowUninitRhs(compiler, to->bbReach, from->bbReach); to->copyEHRegion(from); to->bbCatchTyp = from->bbCatchTyp; to->bbRefs = from->bbRefs; to->bbStkTempsIn = from->bbStkTempsIn; to->bbStkTempsOut = from->bbStkTempsOut; to->bbStkDepth = from->bbStkDepth; to->bbCodeOffs = from->bbCodeOffs; to->bbCodeOffsEnd = from->bbCodeOffsEnd; VarSetOps::AssignAllowUninitRhs(compiler, to->bbScope, from->bbScope); to->bbNatLoopNum = from->bbNatLoopNum; #ifdef DEBUG to->bbTgtStkDepth = from->bbTgtStkDepth; #endif // DEBUG for (Statement* const fromStmt : from->Statements()) { GenTree* newExpr = compiler->gtCloneExpr(fromStmt->GetRootNode(), GTF_EMPTY, varNum, varVal); if (!newExpr) { // gtCloneExpr doesn't handle all opcodes, so may fail to clone a statement. // When that happens, it returns nullptr; abandon the rest of this block and // return `false` to the caller to indicate that cloning was unsuccessful. return false; } compiler->fgInsertStmtAtEnd(to, compiler->fgNewStmtFromTree(newExpr, fromStmt->GetDebugInfo())); } return true; } // LIR helpers void BasicBlock::MakeLIR(GenTree* firstNode, GenTree* lastNode) { assert(!IsLIR()); assert((firstNode == nullptr) == (lastNode == nullptr)); assert((firstNode == lastNode) || firstNode->Precedes(lastNode)); m_firstNode = firstNode; m_lastNode = lastNode; bbFlags |= BBF_IS_LIR; } bool BasicBlock::IsLIR() const { assert(isValid()); const bool isLIR = ((bbFlags & BBF_IS_LIR) != 0); return isLIR; } //------------------------------------------------------------------------ // firstStmt: Returns the first statement in the block // // Arguments: // None. // // Return Value: // The first statement in the block's bbStmtList. // Statement* BasicBlock::firstStmt() const { return bbStmtList; } //------------------------------------------------------------------------ // lastStmt: Returns the last statement in the block // // Arguments: // None. // // Return Value: // The last statement in the block's bbStmtList. // Statement* BasicBlock::lastStmt() const { if (bbStmtList == nullptr) { return nullptr; } Statement* result = bbStmtList->GetPrevStmt(); assert(result != nullptr && result->GetNextStmt() == nullptr); return result; } //------------------------------------------------------------------------ // BasicBlock::lastNode: Returns the last node in the block. // GenTree* BasicBlock::lastNode() const { return IsLIR() ? m_lastNode : lastStmt()->GetRootNode(); } //------------------------------------------------------------------------ // GetUniquePred: Returns the unique predecessor of a block, if one exists. // The predecessor lists must be accurate. // // Arguments: // None. // // Return Value: // The unique predecessor of a block, or nullptr if there is no unique predecessor. // // Notes: // If the first block has a predecessor (which it may have, if it is the target of // a backedge), we never want to consider it "unique" because the prolog is an // implicit predecessor. BasicBlock* BasicBlock::GetUniquePred(Compiler* compiler) const { if ((bbPreds == nullptr) || (bbPreds->flNext != nullptr) || (this == compiler->fgFirstBB)) { return nullptr; } else { return bbPreds->getBlock(); } } //------------------------------------------------------------------------ // GetUniqueSucc: Returns the unique successor of a block, if one exists. // Only considers BBJ_ALWAYS and BBJ_NONE block types. // // Arguments: // None. // // Return Value: // The unique successor of a block, or nullptr if there is no unique successor. BasicBlock* BasicBlock::GetUniqueSucc() const { if (bbJumpKind == BBJ_ALWAYS) { return bbJumpDest; } else if (bbJumpKind == BBJ_NONE) { return bbNext; } else { return nullptr; } } // Static vars. BasicBlock::MemoryPhiArg* BasicBlock::EmptyMemoryPhiDef = (BasicBlock::MemoryPhiArg*)0x1; unsigned JitPtrKeyFuncs<BasicBlock>::GetHashCode(const BasicBlock* ptr) { #ifdef DEBUG unsigned hash = SsaStressHashHelper(); if (hash != 0) { return (hash ^ (ptr->bbNum << 16) ^ ptr->bbNum); } #endif return ptr->bbNum; } //------------------------------------------------------------------------ // isEmpty: check if block is empty or contains only ignorable statements // // Return Value: // True if block is empty, or contains only PHI assignments, // or contains zero or more PHI assignments followed by NOPs. // bool BasicBlock::isEmpty() const { if (!IsLIR()) { for (Statement* const stmt : NonPhiStatements()) { if (!stmt->GetRootNode()->OperIs(GT_NOP)) { return false; } } } else { for (GenTree* node : LIR::AsRange(this)) { if (node->OperGet() != GT_IL_OFFSET) { return false; } } } return true; } //------------------------------------------------------------------------ // isValid: Checks that the basic block doesn't mix statements and LIR lists. // // Return Value: // True if it a valid basic block. // bool BasicBlock::isValid() const { const bool isLIR = ((bbFlags & BBF_IS_LIR) != 0); if (isLIR) { // Should not have statements in LIR. return (bbStmtList == nullptr); } else { // Should not have tree list before LIR. return (GetFirstLIRNode() == nullptr); } } Statement* BasicBlock::FirstNonPhiDef() const { Statement* stmt = firstStmt(); if (stmt == nullptr) { return nullptr; } GenTree* tree = stmt->GetRootNode(); while ((tree->OperGet() == GT_ASG && tree->AsOp()->gtOp2->OperGet() == GT_PHI) || (tree->OperGet() == GT_STORE_LCL_VAR && tree->AsOp()->gtOp1->OperGet() == GT_PHI)) { stmt = stmt->GetNextStmt(); if (stmt == nullptr) { return nullptr; } tree = stmt->GetRootNode(); } return stmt; } Statement* BasicBlock::FirstNonPhiDefOrCatchArgAsg() const { Statement* stmt = FirstNonPhiDef(); if (stmt == nullptr) { return nullptr; } GenTree* tree = stmt->GetRootNode(); if ((tree->OperGet() == GT_ASG && tree->AsOp()->gtOp2->OperGet() == GT_CATCH_ARG) || (tree->OperGet() == GT_STORE_LCL_VAR && tree->AsOp()->gtOp1->OperGet() == GT_CATCH_ARG)) { stmt = stmt->GetNextStmt(); } return stmt; } /***************************************************************************** * * Can a BasicBlock be inserted after this without altering the flowgraph */ bool BasicBlock::bbFallsThrough() const { switch (bbJumpKind) { case BBJ_THROW: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_EHCATCHRET: case BBJ_RETURN: case BBJ_ALWAYS: case BBJ_LEAVE: case BBJ_SWITCH: return false; case BBJ_NONE: case BBJ_COND: return true; case BBJ_CALLFINALLY: return ((bbFlags & BBF_RETLESS_CALL) == 0); default: assert(!"Unknown bbJumpKind in bbFallsThrough()"); return true; } } //------------------------------------------------------------------------ // NumSucc: Returns the count of block successors. See the declaration comment for details. // // Arguments: // None. // // Return Value: // Count of block successors. // unsigned BasicBlock::NumSucc() const { switch (bbJumpKind) { case BBJ_THROW: case BBJ_RETURN: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: return 0; case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: case BBJ_NONE: return 1; case BBJ_COND: if (bbJumpDest == bbNext) { return 1; } else { return 2; } case BBJ_SWITCH: return bbJumpSwt->bbsCount; default: unreached(); } } //------------------------------------------------------------------------ // GetSucc: Returns the requested block successor. See the declaration comment for details. // // Arguments: // i - index of successor to return. 0 <= i <= NumSucc(). // // Return Value: // Requested successor block // BasicBlock* BasicBlock::GetSucc(unsigned i) const { assert(i < NumSucc()); // Index bounds check. switch (bbJumpKind) { case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: return bbJumpDest; case BBJ_NONE: return bbNext; case BBJ_COND: if (i == 0) { return bbNext; } else { assert(i == 1); return bbJumpDest; } case BBJ_SWITCH: return bbJumpSwt->bbsDstTab[i]; default: unreached(); } } //------------------------------------------------------------------------ // NumSucc: Returns the count of block successors. See the declaration comment for details. // // Arguments: // comp - Compiler instance // // Return Value: // Count of block successors. // unsigned BasicBlock::NumSucc(Compiler* comp) { assert(comp != nullptr); switch (bbJumpKind) { case BBJ_THROW: case BBJ_RETURN: return 0; case BBJ_EHFINALLYRET: { // The first block of the handler is labelled with the catch type. BasicBlock* hndBeg = comp->fgFirstBlockOfHandler(this); if (hndBeg->bbCatchTyp == BBCT_FINALLY) { return comp->fgNSuccsOfFinallyRet(this); } else { assert(hndBeg->bbCatchTyp == BBCT_FAULT); // We can only BBJ_EHFINALLYRET from FINALLY and FAULT. // A FAULT block has no successors. return 0; } } case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: case BBJ_NONE: return 1; case BBJ_COND: if (bbJumpDest == bbNext) { return 1; } else { return 2; } case BBJ_SWITCH: { Compiler::SwitchUniqueSuccSet sd = comp->GetDescriptorForSwitch(this); return sd.numDistinctSuccs; } default: unreached(); } } //------------------------------------------------------------------------ // GetSucc: Returns the requested block successor. See the declaration comment for details. // // Arguments: // i - index of successor to return. 0 <= i <= NumSucc(comp). // comp - Compiler instance // // Return Value: // Requested successor block // BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp) { assert(comp != nullptr); assert(i < NumSucc(comp)); // Index bounds check. switch (bbJumpKind) { case BBJ_EHFILTERRET: { // Handler is the (sole) normal successor of the filter. assert(comp->fgFirstBlockOfHandler(this) == bbJumpDest); return bbJumpDest; } case BBJ_EHFINALLYRET: // Note: the following call is expensive. return comp->fgSuccOfFinallyRet(this, i); case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: return bbJumpDest; case BBJ_NONE: return bbNext; case BBJ_COND: if (i == 0) { return bbNext; } else { assert(i == 1); return bbJumpDest; } case BBJ_SWITCH: { Compiler::SwitchUniqueSuccSet sd = comp->GetDescriptorForSwitch(this); assert(i < sd.numDistinctSuccs); // Range check. return sd.nonDuplicates[i]; } default: unreached(); } } void BasicBlock::InitVarSets(Compiler* comp) { VarSetOps::AssignNoCopy(comp, bbVarUse, VarSetOps::MakeEmpty(comp)); VarSetOps::AssignNoCopy(comp, bbVarDef, VarSetOps::MakeEmpty(comp)); VarSetOps::AssignNoCopy(comp, bbLiveIn, VarSetOps::MakeEmpty(comp)); VarSetOps::AssignNoCopy(comp, bbLiveOut, VarSetOps::MakeEmpty(comp)); VarSetOps::AssignNoCopy(comp, bbScope, VarSetOps::MakeEmpty(comp)); bbMemoryUse = emptyMemoryKindSet; bbMemoryDef = emptyMemoryKindSet; bbMemoryLiveIn = emptyMemoryKindSet; bbMemoryLiveOut = emptyMemoryKindSet; } // Returns true if the basic block ends with GT_JMP bool BasicBlock::endsWithJmpMethod(Compiler* comp) const { if (comp->compJmpOpUsed && (bbJumpKind == BBJ_RETURN) && (bbFlags & BBF_HAS_JMP)) { GenTree* lastNode = this->lastNode(); assert(lastNode != nullptr); return lastNode->OperGet() == GT_JMP; } return false; } // Returns true if the basic block ends with either // i) GT_JMP or // ii) tail call (implicit or explicit) // // Params: // comp - Compiler instance // fastTailCallsOnly - Only consider fast tail calls excluding tail calls via helper. // bool BasicBlock::endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly /*=false*/) const { GenTree* tailCall = nullptr; bool tailCallsConvertibleToLoopOnly = false; return endsWithJmpMethod(comp) || endsWithTailCall(comp, fastTailCallsOnly, tailCallsConvertibleToLoopOnly, &tailCall); } //------------------------------------------------------------------------------ // endsWithTailCall : Check if the block ends with a tail call. // // Arguments: // comp - compiler instance // fastTailCallsOnly - check for fast tail calls only // tailCallsConvertibleToLoopOnly - check for tail calls convertible to loop only // tailCall - a pointer to a tree that will be set to the call tree if the block // ends with a tail call and will be set to nullptr otherwise. // // Return Value: // true if the block ends with a tail call; false otherwise. // // Notes: // At most one of fastTailCallsOnly and tailCallsConvertibleToLoopOnly flags can be true. // bool BasicBlock::endsWithTailCall(Compiler* comp, bool fastTailCallsOnly, bool tailCallsConvertibleToLoopOnly, GenTree** tailCall) const { assert(!fastTailCallsOnly || !tailCallsConvertibleToLoopOnly); *tailCall = nullptr; bool result = false; // Is this a tail call? // The reason for keeping this under RyuJIT is so as not to impact existing Jit32 x86 and arm // targets. if (comp->compTailCallUsed) { if (fastTailCallsOnly || tailCallsConvertibleToLoopOnly) { // Only fast tail calls or only tail calls convertible to loops result = (bbFlags & BBF_HAS_JMP) && (bbJumpKind == BBJ_RETURN); } else { // Fast tail calls, tail calls convertible to loops, and tails calls dispatched via helper result = (bbJumpKind == BBJ_THROW) || ((bbFlags & BBF_HAS_JMP) && (bbJumpKind == BBJ_RETURN)); } if (result) { GenTree* lastNode = this->lastNode(); if (lastNode->OperGet() == GT_CALL) { GenTreeCall* call = lastNode->AsCall(); if (tailCallsConvertibleToLoopOnly) { result = call->IsTailCallConvertibleToLoop(); } else if (fastTailCallsOnly) { result = call->IsFastTailCall(); } else { result = call->IsTailCall(); } if (result) { *tailCall = call; } } else { result = false; } } } return result; } //------------------------------------------------------------------------------ // endsWithTailCallConvertibleToLoop : Check if the block ends with a tail call convertible to loop. // // Arguments: // comp - compiler instance // tailCall - a pointer to a tree that will be set to the call tree if the block // ends with a tail call convertible to loop and will be set to nullptr otherwise. // // Return Value: // true if the block ends with a tail call convertible to loop. // bool BasicBlock::endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall) const { bool fastTailCallsOnly = false; bool tailCallsConvertibleToLoopOnly = true; return endsWithTailCall(comp, fastTailCallsOnly, tailCallsConvertibleToLoopOnly, tailCall); } /***************************************************************************** * * Allocate a basic block but don't append it to the current BB list. */ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind) { BasicBlock* block; /* Allocate the block descriptor and zero it out */ assert(fgSafeBasicBlockCreation); block = new (this, CMK_BasicBlock) BasicBlock; #if MEASURE_BLOCK_SIZE BasicBlock::s_Count += 1; BasicBlock::s_Size += sizeof(*block); #endif #ifdef DEBUG // fgLookupBB() is invalid until fgInitBBLookup() is called again. fgBBs = (BasicBlock**)0xCDCD; #endif // TODO-Throughput: The following memset is pretty expensive - do something else? // Note that some fields have to be initialized to 0 (like bbFPStateX87) memset(block, 0, sizeof(*block)); // scopeInfo needs to be able to differentiate between blocks which // correspond to some instrs (and so may have some LocalVarInfo // boundaries), or have been inserted by the JIT block->bbCodeOffs = BAD_IL_OFFSET; block->bbCodeOffsEnd = BAD_IL_OFFSET; #ifdef DEBUG block->bbID = compBasicBlockID++; #endif /* Give the block a number, set the ancestor count and weight */ ++fgBBcount; ++fgBBNumMax; if (compIsForInlining()) { block->bbNum = ++impInlineInfo->InlinerCompiler->fgBBNumMax; } else { block->bbNum = fgBBNumMax; } if (compRationalIRForm) { block->bbFlags |= BBF_IS_LIR; } block->bbRefs = 1; block->bbWeight = BB_UNITY_WEIGHT; block->bbStkTempsIn = NO_BASE_TMP; block->bbStkTempsOut = NO_BASE_TMP; block->bbEntryState = nullptr; /* Record the jump kind in the block */ block->bbJumpKind = jumpKind; if (jumpKind == BBJ_THROW) { block->bbSetRunRarely(); } #ifdef DEBUG if (verbose) { printf("New Basic Block %s created.\n", block->dspToString()); } #endif // We will give all the blocks var sets after the number of tracked variables // is determined and frozen. After that, if we dynamically create a basic block, // we will initialize its var sets. if (fgBBVarSetsInited) { VarSetOps::AssignNoCopy(this, block->bbVarUse, VarSetOps::MakeEmpty(this)); VarSetOps::AssignNoCopy(this, block->bbVarDef, VarSetOps::MakeEmpty(this)); VarSetOps::AssignNoCopy(this, block->bbLiveIn, VarSetOps::MakeEmpty(this)); VarSetOps::AssignNoCopy(this, block->bbLiveOut, VarSetOps::MakeEmpty(this)); VarSetOps::AssignNoCopy(this, block->bbScope, VarSetOps::MakeEmpty(this)); } else { VarSetOps::AssignNoCopy(this, block->bbVarUse, VarSetOps::UninitVal()); VarSetOps::AssignNoCopy(this, block->bbVarDef, VarSetOps::UninitVal()); VarSetOps::AssignNoCopy(this, block->bbLiveIn, VarSetOps::UninitVal()); VarSetOps::AssignNoCopy(this, block->bbLiveOut, VarSetOps::UninitVal()); VarSetOps::AssignNoCopy(this, block->bbScope, VarSetOps::UninitVal()); } block->bbMemoryUse = emptyMemoryKindSet; block->bbMemoryDef = emptyMemoryKindSet; block->bbMemoryLiveIn = emptyMemoryKindSet; block->bbMemoryLiveOut = emptyMemoryKindSet; for (MemoryKind memoryKind : allMemoryKinds()) { block->bbMemorySsaPhiFunc[memoryKind] = nullptr; block->bbMemorySsaNumIn[memoryKind] = 0; block->bbMemorySsaNumOut[memoryKind] = 0; } // Make sure we reserve a NOT_IN_LOOP value that isn't a legal table index. static_assert_no_msg(BasicBlock::MAX_LOOP_NUM < BasicBlock::NOT_IN_LOOP); block->bbNatLoopNum = BasicBlock::NOT_IN_LOOP; return block; } //------------------------------------------------------------------------ // isBBCallAlwaysPair: Determine if this is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair // // Return Value: // True iff "this" is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair // -- a block corresponding to an exit from the try of a try/finally. // // Notes: // In the flow graph, this becomes a block that calls the finally, and a second, immediately // following empty block (in the bbNext chain) to which the finally will return, and which // branches unconditionally to the next block to be executed outside the try/finally. // Note that code is often generated differently than this description. For example, on ARM, // the target of the BBJ_ALWAYS is loaded in LR (the return register), and a direct jump is // made to the 'finally'. The effect is that the 'finally' returns directly to the target of // the BBJ_ALWAYS. A "retless" BBJ_CALLFINALLY is one that has no corresponding BBJ_ALWAYS. // This can happen if the finally is known to not return (e.g., it contains a 'throw'). In // that case, the BBJ_CALLFINALLY flags has BBF_RETLESS_CALL set. Note that ARM never has // "retless" BBJ_CALLFINALLY blocks due to a requirement to use the BBJ_ALWAYS for // generating code. // bool BasicBlock::isBBCallAlwaysPair() const { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (this->bbJumpKind == BBJ_CALLFINALLY) #else if ((this->bbJumpKind == BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) #endif { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // On ARM, there are no retless BBJ_CALLFINALLY. assert(!(this->bbFlags & BBF_RETLESS_CALL)); #endif // Some asserts that the next block is a BBJ_ALWAYS of the proper form. assert(this->bbNext != nullptr); assert(this->bbNext->bbJumpKind == BBJ_ALWAYS); assert(this->bbNext->bbFlags & BBF_KEEP_BBJ_ALWAYS); assert(this->bbNext->isEmpty()); return true; } else { return false; } } //------------------------------------------------------------------------ // isBBCallAlwaysPairTail: Determine if this is the last block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair // // Return Value: // True iff "this" is the last block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair // -- a block corresponding to an exit from the try of a try/finally. // // Notes: // See notes on isBBCallAlwaysPair(), above. // bool BasicBlock::isBBCallAlwaysPairTail() const { return (bbPrev != nullptr) && bbPrev->isBBCallAlwaysPair(); } //------------------------------------------------------------------------ // hasEHBoundaryIn: Determine if this block begins at an EH boundary. // // Return Value: // True iff the block is the target of an EH edge; false otherwise. // // Notes: // For the purposes of this method (and its callers), an EH edge is one on // which the EH flow model requires that all lclVars must be reloaded from // the stack before use, since control flow may transfer to this block through // control flow that is not reflected in the flowgraph. // Note that having a predecessor in a different EH region doesn't require // that lclVars must be reloaded from the stack. That's only required when // this block might be entered via flow that is not represented by an edge // in the flowgraph. // bool BasicBlock::hasEHBoundaryIn() const { bool returnVal = (bbCatchTyp != BBCT_NONE); if (!returnVal) { #if FEATURE_EH_FUNCLETS assert((bbFlags & BBF_FUNCLET_BEG) == 0); #endif // FEATURE_EH_FUNCLETS } return returnVal; } //------------------------------------------------------------------------ // hasEHBoundaryOut: Determine if this block ends in an EH boundary. // // Return Value: // True iff the block ends in an exception boundary that requires that no lclVars // are live in registers; false otherwise. // // Notes: // We may have a successor in a different EH region, but it is OK to have lclVars // live in registers if any successor is a normal flow edge. That's because the // EH write-thru semantics ensure that we always have an up-to-date value on the stack. // bool BasicBlock::hasEHBoundaryOut() const { bool returnVal = false; if (bbJumpKind == BBJ_EHFILTERRET) { returnVal = true; } if (bbJumpKind == BBJ_EHFINALLYRET) { returnVal = true; } #if FEATURE_EH_FUNCLETS if (bbJumpKind == BBJ_EHCATCHRET) { returnVal = true; } #endif // FEATURE_EH_FUNCLETS return returnVal; } //------------------------------------------------------------------------ // BBswtDesc copy ctor: copy a switch descriptor // // Arguments: // comp - compiler instance // other - existing switch descriptor to copy // BBswtDesc::BBswtDesc(Compiler* comp, const BBswtDesc* other) : bbsDstTab(nullptr) , bbsCount(other->bbsCount) , bbsDominantCase(other->bbsDominantCase) , bbsDominantFraction(other->bbsDominantFraction) , bbsHasDefault(other->bbsHasDefault) , bbsHasDominantCase(other->bbsHasDominantCase) { // Allocate and fill in a new dst tab // bbsDstTab = new (comp, CMK_BasicBlock) BasicBlock*[bbsCount]; for (unsigned i = 0; i < bbsCount; i++) { bbsDstTab[i] = other->bbsDstTab[i]; } } //------------------------------------------------------------------------ // unmarkLoopAlign: Unmarks the LOOP_ALIGN flag from the block and reduce the // loop alignment count. // // Arguments: // compiler - Compiler instance // reason - Reason to print in JITDUMP // void BasicBlock::unmarkLoopAlign(Compiler* compiler DEBUG_ARG(const char* reason)) { // Make sure we unmark and count just once. if (isLoopAlign()) { compiler->loopAlignCandidates--; bbFlags &= ~BBF_LOOP_ALIGN; JITDUMP("Unmarking LOOP_ALIGN from " FMT_BB ". Reason= %s.\n", bbNum, reason); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX BasicBlock XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "jitstd/algorithm.h" #if MEASURE_BLOCK_SIZE /* static */ size_t BasicBlock::s_Size; /* static */ size_t BasicBlock::s_Count; #endif // MEASURE_BLOCK_SIZE #ifdef DEBUG // The max # of tree nodes in any BB /* static */ unsigned BasicBlock::s_nMaxTrees; #endif // DEBUG #ifdef DEBUG flowList* ShuffleHelper(unsigned hash, flowList* res) { flowList* head = res; for (flowList *prev = nullptr; res != nullptr; prev = res, res = res->flNext) { unsigned blkHash = (hash ^ (res->getBlock()->bbNum << 16) ^ res->getBlock()->bbNum); if (((blkHash % 1879) & 1) && prev != nullptr) { // Swap res with head. prev->flNext = head; std::swap(head->flNext, res->flNext); std::swap(head, res); } } return head; } unsigned SsaStressHashHelper() { // hash = 0: turned off, hash = 1: use method hash, hash = *: use custom hash. unsigned hash = JitConfig.JitSsaStress(); if (hash == 0) { return hash; } if (hash == 1) { return JitTls::GetCompiler()->info.compMethodHash(); } return ((hash >> 16) == 0) ? ((hash << 16) | hash) : hash; } #endif EHSuccessorIterPosition::EHSuccessorIterPosition(Compiler* comp, BasicBlock* block) : m_remainingRegSuccs(block->NumSucc(comp)), m_curRegSucc(nullptr), m_curTry(comp->ehGetBlockExnFlowDsc(block)) { // If "block" is a "leave helper" block (the empty BBJ_ALWAYS block that pairs with a // preceding BBJ_CALLFINALLY block to implement a "leave" IL instruction), then no exceptions // can occur within it, so clear m_curTry if it's non-null. if (m_curTry != nullptr) { if (block->isBBCallAlwaysPairTail()) { m_curTry = nullptr; } } if (m_curTry == nullptr && m_remainingRegSuccs > 0) { // Examine the successors to see if any are the start of try blocks. FindNextRegSuccTry(comp, block); } } void EHSuccessorIterPosition::FindNextRegSuccTry(Compiler* comp, BasicBlock* block) { assert(m_curTry == nullptr); // Must now consider the next regular successor, if any. while (m_remainingRegSuccs > 0) { m_remainingRegSuccs--; m_curRegSucc = block->GetSucc(m_remainingRegSuccs, comp); if (comp->bbIsTryBeg(m_curRegSucc)) { assert(m_curRegSucc->hasTryIndex()); // Since it is a try begin. unsigned newTryIndex = m_curRegSucc->getTryIndex(); // If the try region started by "m_curRegSucc" (represented by newTryIndex) contains m_block, // we've already yielded its handler, as one of the EH handler successors of m_block itself. if (comp->bbInExnFlowRegions(newTryIndex, block)) { continue; } // Otherwise, consider this try. m_curTry = comp->ehGetDsc(newTryIndex); break; } } } void EHSuccessorIterPosition::Advance(Compiler* comp, BasicBlock* block) { assert(m_curTry != nullptr); if (m_curTry->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) { m_curTry = comp->ehGetDsc(m_curTry->ebdEnclosingTryIndex); // If we've gone over into considering try's containing successors, // then the enclosing try must have the successor as its first block. if (m_curRegSucc == nullptr || m_curTry->ebdTryBeg == m_curRegSucc) { return; } // Otherwise, give up, try the next regular successor. m_curTry = nullptr; } else { m_curTry = nullptr; } // We've exhausted all try blocks. // See if there are any remaining regular successors that start try blocks. FindNextRegSuccTry(comp, block); } BasicBlock* EHSuccessorIterPosition::Current(Compiler* comp, BasicBlock* block) { assert(m_curTry != nullptr); return m_curTry->ExFlowBlock(); } flowList* Compiler::BlockPredsWithEH(BasicBlock* blk) { BlockToFlowListMap* ehPreds = GetBlockToEHPreds(); flowList* res; if (ehPreds->Lookup(blk, &res)) { return res; } res = blk->bbPreds; unsigned tryIndex; if (bbIsExFlowBlock(blk, &tryIndex)) { // Find the first block of the try. EHblkDsc* ehblk = ehGetDsc(tryIndex); BasicBlock* tryStart = ehblk->ebdTryBeg; for (BasicBlock* const tryStartPredBlock : tryStart->PredBlocks()) { res = new (this, CMK_FlowList) flowList(tryStartPredBlock, res); #if MEASURE_BLOCK_SIZE genFlowNodeCnt += 1; genFlowNodeSize += sizeof(flowList); #endif // MEASURE_BLOCK_SIZE } // Now add all blocks handled by this handler (except for second blocks of BBJ_CALLFINALLY/BBJ_ALWAYS pairs; // these cannot cause transfer to the handler...) // TODO-Throughput: It would be nice if we could iterate just over the blocks in the try, via // something like: // for (BasicBlock* bb = ehblk->ebdTryBeg; bb != ehblk->ebdTryLast->bbNext; bb = bb->bbNext) // (plus adding in any filter blocks outside the try whose exceptions are handled here). // That doesn't work, however: funclets have caused us to sometimes split the body of a try into // more than one sequence of contiguous blocks. We need to find a better way to do this. for (BasicBlock* const bb : Blocks()) { if (bbInExnFlowRegions(tryIndex, bb) && !bb->isBBCallAlwaysPairTail()) { res = new (this, CMK_FlowList) flowList(bb, res); #if MEASURE_BLOCK_SIZE genFlowNodeCnt += 1; genFlowNodeSize += sizeof(flowList); #endif // MEASURE_BLOCK_SIZE } } #ifdef DEBUG unsigned hash = SsaStressHashHelper(); if (hash != 0) { res = ShuffleHelper(hash, res); } #endif // DEBUG ehPreds->Set(blk, res); } return res; } //------------------------------------------------------------------------ // checkPredListOrder: see if pred list is properly ordered // // Returns: // false if pred list is not in increasing bbNum order. // bool BasicBlock::checkPredListOrder() { unsigned lastBBNum = 0; for (BasicBlock* const predBlock : PredBlocks()) { const unsigned bbNum = predBlock->bbNum; if (bbNum <= lastBBNum) { assert(bbNum != lastBBNum); return false; } lastBBNum = bbNum; } return true; } //------------------------------------------------------------------------ // ensurePredListOrder: ensure all pred list entries appear in increasing // bbNum order. // // Arguments: // compiler - current compiler instance // void BasicBlock::ensurePredListOrder(Compiler* compiler) { // First, check if list is already in order. // if (checkPredListOrder()) { return; } reorderPredList(compiler); assert(checkPredListOrder()); } //------------------------------------------------------------------------ // reorderPredList: relink pred list in increasing bbNum order. // // Arguments: // compiler - current compiler instance // void BasicBlock::reorderPredList(Compiler* compiler) { // Count number or entries. // int count = 0; for (flowList* const pred : PredEdges()) { count++; } // If only 0 or 1 entry, nothing to reorder. // if (count < 2) { return; } // Allocate sort vector if needed. // if (compiler->fgPredListSortVector == nullptr) { CompAllocator allocator = compiler->getAllocator(CMK_FlowList); compiler->fgPredListSortVector = new (allocator) jitstd::vector<flowList*>(allocator); } jitstd::vector<flowList*>* const sortVector = compiler->fgPredListSortVector; sortVector->clear(); // Fill in the vector from the list. // for (flowList* const pred : PredEdges()) { sortVector->push_back(pred); } // Sort by increasing bbNum // struct flowListBBNumCmp { bool operator()(const flowList* f1, const flowList* f2) { return f1->getBlock()->bbNum < f2->getBlock()->bbNum; } }; jitstd::sort(sortVector->begin(), sortVector->end(), flowListBBNumCmp()); // Rethread the list. // flowList* last = nullptr; for (flowList* current : *sortVector) { if (last == nullptr) { bbPreds = current; } else { last->flNext = current; } last = current; } last->flNext = nullptr; // Note this lastPred is only used transiently. // bbLastPred = last; } #ifdef DEBUG //------------------------------------------------------------------------ // dspBlockILRange(): Display the block's IL range as [XXX...YYY), where XXX and YYY might be "???" for BAD_IL_OFFSET. // void BasicBlock::dspBlockILRange() const { if (bbCodeOffs != BAD_IL_OFFSET) { printf("[%03X..", bbCodeOffs); } else { printf("[???" ".."); } if (bbCodeOffsEnd != BAD_IL_OFFSET) { // brace-matching editor workaround for following line: ( printf("%03X)", bbCodeOffsEnd); } else { // brace-matching editor workaround for following line: ( printf("???" ")"); } } //------------------------------------------------------------------------ // dspFlags: Print out the block's flags // void BasicBlock::dspFlags() { if (bbFlags & BBF_VISITED) { printf("v "); } if (bbFlags & BBF_MARKED) { printf("m "); } if (bbFlags & BBF_CHANGED) { printf("! "); } if (bbFlags & BBF_REMOVED) { printf("del "); } if (bbFlags & BBF_DONT_REMOVE) { printf("keep "); } if (bbFlags & BBF_IMPORTED) { printf("i "); } if (bbFlags & BBF_INTERNAL) { printf("internal "); } if (bbFlags & BBF_FAILED_VERIFICATION) { printf("failV "); } if (bbFlags & BBF_TRY_BEG) { printf("try "); } if (bbFlags & BBF_RUN_RARELY) { printf("rare "); } if (bbFlags & BBF_LOOP_HEAD) { printf("Loop "); } if (bbFlags & BBF_LOOP_CALL0) { printf("Loop0 "); } if (bbFlags & BBF_LOOP_CALL1) { printf("Loop1 "); } if (bbFlags & BBF_HAS_LABEL) { printf("label "); } if (bbFlags & BBF_HAS_JMP) { printf("jmp "); } if (bbFlags & BBF_HAS_CALL) { printf("hascall "); } if (bbFlags & BBF_GC_SAFE_POINT) { printf("gcsafe "); } if (bbFlags & BBF_FUNCLET_BEG) { printf("flet "); } if (bbFlags & BBF_HAS_IDX_LEN) { printf("idxlen "); } if (bbFlags & BBF_HAS_NEWARRAY) { printf("new[] "); } if (bbFlags & BBF_HAS_NEWOBJ) { printf("newobj "); } if (bbFlags & BBF_HAS_NULLCHECK) { printf("nullcheck "); } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (bbFlags & BBF_FINALLY_TARGET) { printf("ftarget "); } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (bbFlags & BBF_BACKWARD_JUMP) { printf("bwd "); } if (bbFlags & BBF_BACKWARD_JUMP_TARGET) { printf("bwd-target "); } if (bbFlags & BBF_BACKWARD_JUMP_SOURCE) { printf("bwd-src "); } if (bbFlags & BBF_PATCHPOINT) { printf("ppoint "); } if (bbFlags & BBF_PARTIAL_COMPILATION_PATCHPOINT) { printf("pc-ppoint "); } if (bbFlags & BBF_RETLESS_CALL) { printf("retless "); } if (bbFlags & BBF_LOOP_PREHEADER) { printf("LoopPH "); } if (bbFlags & BBF_COLD) { printf("cold "); } if (bbFlags & BBF_PROF_WEIGHT) { printf("IBC "); } if (bbFlags & BBF_IS_LIR) { printf("LIR "); } if (bbFlags & BBF_KEEP_BBJ_ALWAYS) { printf("KEEP "); } if (bbFlags & BBF_CLONED_FINALLY_BEGIN) { printf("cfb "); } if (bbFlags & BBF_CLONED_FINALLY_END) { printf("cfe "); } if (bbFlags & BBF_LOOP_ALIGN) { printf("align "); } } /***************************************************************************** * * Display the bbPreds basic block list (the block predecessors). * Returns the number of characters printed. */ unsigned BasicBlock::dspPreds() { unsigned count = 0; for (flowList* const pred : PredEdges()) { if (count != 0) { printf(","); count += 1; } printf(FMT_BB, pred->getBlock()->bbNum); count += 4; // Account for %02u only handling 2 digits, but we can display more than that. unsigned digits = CountDigits(pred->getBlock()->bbNum); if (digits > 2) { count += digits - 2; } // Does this predecessor have an interesting dup count? If so, display it. if (pred->flDupCount > 1) { printf("(%u)", pred->flDupCount); count += 2 + CountDigits(pred->flDupCount); } } return count; } /***************************************************************************** * * Display the bbCheapPreds basic block list (the block predecessors). * Returns the number of characters printed. */ unsigned BasicBlock::dspCheapPreds() { unsigned count = 0; for (BasicBlockList* pred = bbCheapPreds; pred != nullptr; pred = pred->next) { if (count != 0) { printf(","); count += 1; } printf(FMT_BB, pred->block->bbNum); count += 4; // Account for %02u only handling 2 digits, but we can display more than that. unsigned digits = CountDigits(pred->block->bbNum); if (digits > 2) { count += digits - 2; } } return count; } //------------------------------------------------------------------------ // dspSuccs: Display the basic block successors. // // Arguments: // compiler - compiler instance; passed to NumSucc(Compiler*) -- see that function for implications. // void BasicBlock::dspSuccs(Compiler* compiler) { bool first = true; // If this is a switch, we don't want to call `Succs(Compiler*)` because it will eventually call // `GetSwitchDescMap()`, and that will have the side-effect of allocating the unique switch descriptor map // and/or compute this switch block's unique succ set if it is not present. Debug output functions should // never have an effect on codegen. We also don't want to assume the unique succ set is accurate, so we // compute it ourselves here. if (bbJumpKind == BBJ_SWITCH) { // Create a set with all the successors. Don't use BlockSet, so we don't need to worry // about the BlockSet epoch. unsigned bbNumMax = compiler->impInlineRoot()->fgBBNumMax; BitVecTraits bitVecTraits(bbNumMax + 1, compiler); BitVec uniqueSuccBlocks(BitVecOps::MakeEmpty(&bitVecTraits)); for (BasicBlock* const bTarget : SwitchTargets()) { BitVecOps::AddElemD(&bitVecTraits, uniqueSuccBlocks, bTarget->bbNum); } BitVecOps::Iter iter(&bitVecTraits, uniqueSuccBlocks); unsigned bbNum = 0; while (iter.NextElem(&bbNum)) { // Note that we will output switch successors in increasing numerical bbNum order, which is // not related to their order in the bbJumpSwt->bbsDstTab table. printf("%s" FMT_BB, first ? "" : ",", bbNum); first = false; } } else { for (BasicBlock* const succ : Succs(compiler)) { printf("%s" FMT_BB, first ? "" : ",", succ->bbNum); first = false; } } } // Display a compact representation of the bbJumpKind, that is, where this block branches. // This is similar to code in Compiler::fgTableDispBasicBlock(), but doesn't have that code's requirements to align // things strictly. void BasicBlock::dspJumpKind() { switch (bbJumpKind) { case BBJ_EHFINALLYRET: printf(" (finret)"); break; case BBJ_EHFILTERRET: printf(" (fltret)"); break; case BBJ_EHCATCHRET: printf(" -> " FMT_BB " (cret)", bbJumpDest->bbNum); break; case BBJ_THROW: printf(" (throw)"); break; case BBJ_RETURN: printf(" (return)"); break; case BBJ_NONE: // For fall-through blocks, print nothing. break; case BBJ_ALWAYS: if (bbFlags & BBF_KEEP_BBJ_ALWAYS) { printf(" -> " FMT_BB " (ALWAYS)", bbJumpDest->bbNum); } else { printf(" -> " FMT_BB " (always)", bbJumpDest->bbNum); } break; case BBJ_LEAVE: printf(" -> " FMT_BB " (leave)", bbJumpDest->bbNum); break; case BBJ_CALLFINALLY: printf(" -> " FMT_BB " (callf)", bbJumpDest->bbNum); break; case BBJ_COND: printf(" -> " FMT_BB " (cond)", bbJumpDest->bbNum); break; case BBJ_SWITCH: { printf(" ->"); const unsigned jumpCnt = bbJumpSwt->bbsCount; BasicBlock** const jumpTab = bbJumpSwt->bbsDstTab; for (unsigned i = 0; i < jumpCnt; i++) { printf("%c" FMT_BB, (i == 0) ? ' ' : ',', jumpTab[i]->bbNum); const bool isDefault = bbJumpSwt->bbsHasDefault && (i == jumpCnt - 1); if (isDefault) { printf("[def]"); } const bool isDominant = bbJumpSwt->bbsHasDominantCase && (i == bbJumpSwt->bbsDominantCase); if (isDominant) { printf("[dom(" FMT_WT ")]", bbJumpSwt->bbsDominantFraction); } } printf(" (switch)"); } break; default: unreached(); break; } } void BasicBlock::dspBlockHeader(Compiler* compiler, bool showKind /*= true*/, bool showFlags /*= false*/, bool showPreds /*= true*/) { printf(FMT_BB " ", bbNum); dspBlockILRange(); if (showKind) { dspJumpKind(); } if (showPreds) { printf(", preds={"); if (compiler->fgCheapPredsValid) { dspCheapPreds(); } else { dspPreds(); } printf("} succs={"); dspSuccs(compiler); printf("}"); } if (showFlags) { const unsigned lowFlags = (unsigned)bbFlags; const unsigned highFlags = (unsigned)(bbFlags >> 32); printf(" flags=0x%08x.%08x: ", highFlags, lowFlags); dspFlags(); } printf("\n"); } const char* BasicBlock::dspToString(int blockNumPadding /* = 0 */) { static char buffers[3][64]; // static array of 3 to allow 3 concurrent calls in one printf() static int nextBufferIndex = 0; auto& buffer = buffers[nextBufferIndex]; nextBufferIndex = (nextBufferIndex + 1) % ArrLen(buffers); _snprintf_s(buffer, ArrLen(buffer), ArrLen(buffer), FMT_BB "%*s [%04u]", bbNum, blockNumPadding, "", bbID); return buffer; } #endif // DEBUG // Allocation function for MemoryPhiArg. void* BasicBlock::MemoryPhiArg::operator new(size_t sz, Compiler* comp) { return comp->getAllocator(CMK_MemoryPhiArg).allocate<char>(sz); } //------------------------------------------------------------------------ // CloneBlockState: Try to populate `to` block with a copy of `from` block's statements, replacing // uses of local `varNum` with IntCns `varVal`. // // Arguments: // compiler - Jit compiler instance // to - New/empty block to copy statements into // from - Block to copy statements from // varNum - lclVar uses with lclNum `varNum` will be replaced; can be ~0 to indicate no replacement. // varVal - If replacing uses of `varNum`, replace them with int constants with value `varVal`. // // Return Value: // Cloning may fail because this routine uses `gtCloneExpr` for cloning and it can't handle all // IR nodes. If cloning of any statement fails, `false` will be returned and block `to` may be // partially populated. If cloning of all statements succeeds, `true` will be returned and // block `to` will be fully populated. bool BasicBlock::CloneBlockState( Compiler* compiler, BasicBlock* to, const BasicBlock* from, unsigned varNum, int varVal) { assert(to->bbStmtList == nullptr); to->bbFlags = from->bbFlags; to->bbWeight = from->bbWeight; BlockSetOps::AssignAllowUninitRhs(compiler, to->bbReach, from->bbReach); to->copyEHRegion(from); to->bbCatchTyp = from->bbCatchTyp; to->bbRefs = from->bbRefs; to->bbStkTempsIn = from->bbStkTempsIn; to->bbStkTempsOut = from->bbStkTempsOut; to->bbStkDepth = from->bbStkDepth; to->bbCodeOffs = from->bbCodeOffs; to->bbCodeOffsEnd = from->bbCodeOffsEnd; VarSetOps::AssignAllowUninitRhs(compiler, to->bbScope, from->bbScope); to->bbNatLoopNum = from->bbNatLoopNum; #ifdef DEBUG to->bbTgtStkDepth = from->bbTgtStkDepth; #endif // DEBUG for (Statement* const fromStmt : from->Statements()) { GenTree* newExpr = compiler->gtCloneExpr(fromStmt->GetRootNode(), GTF_EMPTY, varNum, varVal); if (!newExpr) { // gtCloneExpr doesn't handle all opcodes, so may fail to clone a statement. // When that happens, it returns nullptr; abandon the rest of this block and // return `false` to the caller to indicate that cloning was unsuccessful. return false; } compiler->fgInsertStmtAtEnd(to, compiler->fgNewStmtFromTree(newExpr, fromStmt->GetDebugInfo())); } return true; } // LIR helpers void BasicBlock::MakeLIR(GenTree* firstNode, GenTree* lastNode) { assert(!IsLIR()); assert((firstNode == nullptr) == (lastNode == nullptr)); assert((firstNode == lastNode) || firstNode->Precedes(lastNode)); m_firstNode = firstNode; m_lastNode = lastNode; bbFlags |= BBF_IS_LIR; } bool BasicBlock::IsLIR() const { assert(isValid()); const bool isLIR = ((bbFlags & BBF_IS_LIR) != 0); return isLIR; } //------------------------------------------------------------------------ // firstStmt: Returns the first statement in the block // // Arguments: // None. // // Return Value: // The first statement in the block's bbStmtList. // Statement* BasicBlock::firstStmt() const { return bbStmtList; } //------------------------------------------------------------------------ // lastStmt: Returns the last statement in the block // // Arguments: // None. // // Return Value: // The last statement in the block's bbStmtList. // Statement* BasicBlock::lastStmt() const { if (bbStmtList == nullptr) { return nullptr; } Statement* result = bbStmtList->GetPrevStmt(); assert(result != nullptr && result->GetNextStmt() == nullptr); return result; } //------------------------------------------------------------------------ // BasicBlock::lastNode: Returns the last node in the block. // GenTree* BasicBlock::lastNode() const { return IsLIR() ? m_lastNode : lastStmt()->GetRootNode(); } //------------------------------------------------------------------------ // GetUniquePred: Returns the unique predecessor of a block, if one exists. // The predecessor lists must be accurate. // // Arguments: // None. // // Return Value: // The unique predecessor of a block, or nullptr if there is no unique predecessor. // // Notes: // If the first block has a predecessor (which it may have, if it is the target of // a backedge), we never want to consider it "unique" because the prolog is an // implicit predecessor. BasicBlock* BasicBlock::GetUniquePred(Compiler* compiler) const { if ((bbPreds == nullptr) || (bbPreds->flNext != nullptr) || (this == compiler->fgFirstBB)) { return nullptr; } else { return bbPreds->getBlock(); } } //------------------------------------------------------------------------ // GetUniqueSucc: Returns the unique successor of a block, if one exists. // Only considers BBJ_ALWAYS and BBJ_NONE block types. // // Arguments: // None. // // Return Value: // The unique successor of a block, or nullptr if there is no unique successor. BasicBlock* BasicBlock::GetUniqueSucc() const { if (bbJumpKind == BBJ_ALWAYS) { return bbJumpDest; } else if (bbJumpKind == BBJ_NONE) { return bbNext; } else { return nullptr; } } // Static vars. BasicBlock::MemoryPhiArg* BasicBlock::EmptyMemoryPhiDef = (BasicBlock::MemoryPhiArg*)0x1; unsigned JitPtrKeyFuncs<BasicBlock>::GetHashCode(const BasicBlock* ptr) { #ifdef DEBUG unsigned hash = SsaStressHashHelper(); if (hash != 0) { return (hash ^ (ptr->bbNum << 16) ^ ptr->bbNum); } #endif return ptr->bbNum; } //------------------------------------------------------------------------ // isEmpty: check if block is empty or contains only ignorable statements // // Return Value: // True if block is empty, or contains only PHI assignments, // or contains zero or more PHI assignments followed by NOPs. // bool BasicBlock::isEmpty() const { if (!IsLIR()) { for (Statement* const stmt : NonPhiStatements()) { if (!stmt->GetRootNode()->OperIs(GT_NOP)) { return false; } } } else { for (GenTree* node : LIR::AsRange(this)) { if (node->OperGet() != GT_IL_OFFSET) { return false; } } } return true; } //------------------------------------------------------------------------ // isValid: Checks that the basic block doesn't mix statements and LIR lists. // // Return Value: // True if it a valid basic block. // bool BasicBlock::isValid() const { const bool isLIR = ((bbFlags & BBF_IS_LIR) != 0); if (isLIR) { // Should not have statements in LIR. return (bbStmtList == nullptr); } else { // Should not have tree list before LIR. return (GetFirstLIRNode() == nullptr); } } Statement* BasicBlock::FirstNonPhiDef() const { Statement* stmt = firstStmt(); if (stmt == nullptr) { return nullptr; } GenTree* tree = stmt->GetRootNode(); while ((tree->OperGet() == GT_ASG && tree->AsOp()->gtOp2->OperGet() == GT_PHI) || (tree->OperGet() == GT_STORE_LCL_VAR && tree->AsOp()->gtOp1->OperGet() == GT_PHI)) { stmt = stmt->GetNextStmt(); if (stmt == nullptr) { return nullptr; } tree = stmt->GetRootNode(); } return stmt; } Statement* BasicBlock::FirstNonPhiDefOrCatchArgAsg() const { Statement* stmt = FirstNonPhiDef(); if (stmt == nullptr) { return nullptr; } GenTree* tree = stmt->GetRootNode(); if ((tree->OperGet() == GT_ASG && tree->AsOp()->gtOp2->OperGet() == GT_CATCH_ARG) || (tree->OperGet() == GT_STORE_LCL_VAR && tree->AsOp()->gtOp1->OperGet() == GT_CATCH_ARG)) { stmt = stmt->GetNextStmt(); } return stmt; } /***************************************************************************** * * Can a BasicBlock be inserted after this without altering the flowgraph */ bool BasicBlock::bbFallsThrough() const { switch (bbJumpKind) { case BBJ_THROW: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_EHCATCHRET: case BBJ_RETURN: case BBJ_ALWAYS: case BBJ_LEAVE: case BBJ_SWITCH: return false; case BBJ_NONE: case BBJ_COND: return true; case BBJ_CALLFINALLY: return ((bbFlags & BBF_RETLESS_CALL) == 0); default: assert(!"Unknown bbJumpKind in bbFallsThrough()"); return true; } } //------------------------------------------------------------------------ // NumSucc: Returns the count of block successors. See the declaration comment for details. // // Arguments: // None. // // Return Value: // Count of block successors. // unsigned BasicBlock::NumSucc() const { switch (bbJumpKind) { case BBJ_THROW: case BBJ_RETURN: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: return 0; case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: case BBJ_NONE: return 1; case BBJ_COND: if (bbJumpDest == bbNext) { return 1; } else { return 2; } case BBJ_SWITCH: return bbJumpSwt->bbsCount; default: unreached(); } } //------------------------------------------------------------------------ // GetSucc: Returns the requested block successor. See the declaration comment for details. // // Arguments: // i - index of successor to return. 0 <= i <= NumSucc(). // // Return Value: // Requested successor block // BasicBlock* BasicBlock::GetSucc(unsigned i) const { assert(i < NumSucc()); // Index bounds check. switch (bbJumpKind) { case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: return bbJumpDest; case BBJ_NONE: return bbNext; case BBJ_COND: if (i == 0) { return bbNext; } else { assert(i == 1); return bbJumpDest; } case BBJ_SWITCH: return bbJumpSwt->bbsDstTab[i]; default: unreached(); } } //------------------------------------------------------------------------ // NumSucc: Returns the count of block successors. See the declaration comment for details. // // Arguments: // comp - Compiler instance // // Return Value: // Count of block successors. // unsigned BasicBlock::NumSucc(Compiler* comp) { assert(comp != nullptr); switch (bbJumpKind) { case BBJ_THROW: case BBJ_RETURN: return 0; case BBJ_EHFINALLYRET: { // The first block of the handler is labelled with the catch type. BasicBlock* hndBeg = comp->fgFirstBlockOfHandler(this); if (hndBeg->bbCatchTyp == BBCT_FINALLY) { return comp->fgNSuccsOfFinallyRet(this); } else { assert(hndBeg->bbCatchTyp == BBCT_FAULT); // We can only BBJ_EHFINALLYRET from FINALLY and FAULT. // A FAULT block has no successors. return 0; } } case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: case BBJ_NONE: return 1; case BBJ_COND: if (bbJumpDest == bbNext) { return 1; } else { return 2; } case BBJ_SWITCH: { Compiler::SwitchUniqueSuccSet sd = comp->GetDescriptorForSwitch(this); return sd.numDistinctSuccs; } default: unreached(); } } //------------------------------------------------------------------------ // GetSucc: Returns the requested block successor. See the declaration comment for details. // // Arguments: // i - index of successor to return. 0 <= i <= NumSucc(comp). // comp - Compiler instance // // Return Value: // Requested successor block // BasicBlock* BasicBlock::GetSucc(unsigned i, Compiler* comp) { assert(comp != nullptr); assert(i < NumSucc(comp)); // Index bounds check. switch (bbJumpKind) { case BBJ_EHFILTERRET: { // Handler is the (sole) normal successor of the filter. assert(comp->fgFirstBlockOfHandler(this) == bbJumpDest); return bbJumpDest; } case BBJ_EHFINALLYRET: // Note: the following call is expensive. return comp->fgSuccOfFinallyRet(this, i); case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: return bbJumpDest; case BBJ_NONE: return bbNext; case BBJ_COND: if (i == 0) { return bbNext; } else { assert(i == 1); return bbJumpDest; } case BBJ_SWITCH: { Compiler::SwitchUniqueSuccSet sd = comp->GetDescriptorForSwitch(this); assert(i < sd.numDistinctSuccs); // Range check. return sd.nonDuplicates[i]; } default: unreached(); } } void BasicBlock::InitVarSets(Compiler* comp) { VarSetOps::AssignNoCopy(comp, bbVarUse, VarSetOps::MakeEmpty(comp)); VarSetOps::AssignNoCopy(comp, bbVarDef, VarSetOps::MakeEmpty(comp)); VarSetOps::AssignNoCopy(comp, bbLiveIn, VarSetOps::MakeEmpty(comp)); VarSetOps::AssignNoCopy(comp, bbLiveOut, VarSetOps::MakeEmpty(comp)); VarSetOps::AssignNoCopy(comp, bbScope, VarSetOps::MakeEmpty(comp)); bbMemoryUse = emptyMemoryKindSet; bbMemoryDef = emptyMemoryKindSet; bbMemoryLiveIn = emptyMemoryKindSet; bbMemoryLiveOut = emptyMemoryKindSet; } // Returns true if the basic block ends with GT_JMP bool BasicBlock::endsWithJmpMethod(Compiler* comp) const { if (comp->compJmpOpUsed && (bbJumpKind == BBJ_RETURN) && (bbFlags & BBF_HAS_JMP)) { GenTree* lastNode = this->lastNode(); assert(lastNode != nullptr); return lastNode->OperGet() == GT_JMP; } return false; } // Returns true if the basic block ends with either // i) GT_JMP or // ii) tail call (implicit or explicit) // // Params: // comp - Compiler instance // fastTailCallsOnly - Only consider fast tail calls excluding tail calls via helper. // bool BasicBlock::endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly /*=false*/) const { GenTree* tailCall = nullptr; bool tailCallsConvertibleToLoopOnly = false; return endsWithJmpMethod(comp) || endsWithTailCall(comp, fastTailCallsOnly, tailCallsConvertibleToLoopOnly, &tailCall); } //------------------------------------------------------------------------------ // endsWithTailCall : Check if the block ends with a tail call. // // Arguments: // comp - compiler instance // fastTailCallsOnly - check for fast tail calls only // tailCallsConvertibleToLoopOnly - check for tail calls convertible to loop only // tailCall - a pointer to a tree that will be set to the call tree if the block // ends with a tail call and will be set to nullptr otherwise. // // Return Value: // true if the block ends with a tail call; false otherwise. // // Notes: // At most one of fastTailCallsOnly and tailCallsConvertibleToLoopOnly flags can be true. // bool BasicBlock::endsWithTailCall(Compiler* comp, bool fastTailCallsOnly, bool tailCallsConvertibleToLoopOnly, GenTree** tailCall) const { assert(!fastTailCallsOnly || !tailCallsConvertibleToLoopOnly); *tailCall = nullptr; bool result = false; // Is this a tail call? // The reason for keeping this under RyuJIT is so as not to impact existing Jit32 x86 and arm // targets. if (comp->compTailCallUsed) { if (fastTailCallsOnly || tailCallsConvertibleToLoopOnly) { // Only fast tail calls or only tail calls convertible to loops result = (bbFlags & BBF_HAS_JMP) && (bbJumpKind == BBJ_RETURN); } else { // Fast tail calls, tail calls convertible to loops, and tails calls dispatched via helper result = (bbJumpKind == BBJ_THROW) || ((bbFlags & BBF_HAS_JMP) && (bbJumpKind == BBJ_RETURN)); } if (result) { GenTree* lastNode = this->lastNode(); if (lastNode->OperGet() == GT_CALL) { GenTreeCall* call = lastNode->AsCall(); if (tailCallsConvertibleToLoopOnly) { result = call->IsTailCallConvertibleToLoop(); } else if (fastTailCallsOnly) { result = call->IsFastTailCall(); } else { result = call->IsTailCall(); } if (result) { *tailCall = call; } } else { result = false; } } } return result; } //------------------------------------------------------------------------------ // endsWithTailCallConvertibleToLoop : Check if the block ends with a tail call convertible to loop. // // Arguments: // comp - compiler instance // tailCall - a pointer to a tree that will be set to the call tree if the block // ends with a tail call convertible to loop and will be set to nullptr otherwise. // // Return Value: // true if the block ends with a tail call convertible to loop. // bool BasicBlock::endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall) const { bool fastTailCallsOnly = false; bool tailCallsConvertibleToLoopOnly = true; return endsWithTailCall(comp, fastTailCallsOnly, tailCallsConvertibleToLoopOnly, tailCall); } /***************************************************************************** * * Allocate a basic block but don't append it to the current BB list. */ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind) { BasicBlock* block; /* Allocate the block descriptor and zero it out */ assert(fgSafeBasicBlockCreation); block = new (this, CMK_BasicBlock) BasicBlock; #if MEASURE_BLOCK_SIZE BasicBlock::s_Count += 1; BasicBlock::s_Size += sizeof(*block); #endif #ifdef DEBUG // fgLookupBB() is invalid until fgInitBBLookup() is called again. fgBBs = (BasicBlock**)0xCDCD; #endif // TODO-Throughput: The following memset is pretty expensive - do something else? // Note that some fields have to be initialized to 0 (like bbFPStateX87) memset(block, 0, sizeof(*block)); // scopeInfo needs to be able to differentiate between blocks which // correspond to some instrs (and so may have some LocalVarInfo // boundaries), or have been inserted by the JIT block->bbCodeOffs = BAD_IL_OFFSET; block->bbCodeOffsEnd = BAD_IL_OFFSET; #ifdef DEBUG block->bbID = compBasicBlockID++; #endif /* Give the block a number, set the ancestor count and weight */ ++fgBBcount; ++fgBBNumMax; if (compIsForInlining()) { block->bbNum = ++impInlineInfo->InlinerCompiler->fgBBNumMax; } else { block->bbNum = fgBBNumMax; } if (compRationalIRForm) { block->bbFlags |= BBF_IS_LIR; } block->bbRefs = 1; block->bbWeight = BB_UNITY_WEIGHT; block->bbStkTempsIn = NO_BASE_TMP; block->bbStkTempsOut = NO_BASE_TMP; block->bbEntryState = nullptr; /* Record the jump kind in the block */ block->bbJumpKind = jumpKind; if (jumpKind == BBJ_THROW) { block->bbSetRunRarely(); } #ifdef DEBUG if (verbose) { printf("New Basic Block %s created.\n", block->dspToString()); } #endif // We will give all the blocks var sets after the number of tracked variables // is determined and frozen. After that, if we dynamically create a basic block, // we will initialize its var sets. if (fgBBVarSetsInited) { VarSetOps::AssignNoCopy(this, block->bbVarUse, VarSetOps::MakeEmpty(this)); VarSetOps::AssignNoCopy(this, block->bbVarDef, VarSetOps::MakeEmpty(this)); VarSetOps::AssignNoCopy(this, block->bbLiveIn, VarSetOps::MakeEmpty(this)); VarSetOps::AssignNoCopy(this, block->bbLiveOut, VarSetOps::MakeEmpty(this)); VarSetOps::AssignNoCopy(this, block->bbScope, VarSetOps::MakeEmpty(this)); } else { VarSetOps::AssignNoCopy(this, block->bbVarUse, VarSetOps::UninitVal()); VarSetOps::AssignNoCopy(this, block->bbVarDef, VarSetOps::UninitVal()); VarSetOps::AssignNoCopy(this, block->bbLiveIn, VarSetOps::UninitVal()); VarSetOps::AssignNoCopy(this, block->bbLiveOut, VarSetOps::UninitVal()); VarSetOps::AssignNoCopy(this, block->bbScope, VarSetOps::UninitVal()); } block->bbMemoryUse = emptyMemoryKindSet; block->bbMemoryDef = emptyMemoryKindSet; block->bbMemoryLiveIn = emptyMemoryKindSet; block->bbMemoryLiveOut = emptyMemoryKindSet; for (MemoryKind memoryKind : allMemoryKinds()) { block->bbMemorySsaPhiFunc[memoryKind] = nullptr; block->bbMemorySsaNumIn[memoryKind] = 0; block->bbMemorySsaNumOut[memoryKind] = 0; } // Make sure we reserve a NOT_IN_LOOP value that isn't a legal table index. static_assert_no_msg(BasicBlock::MAX_LOOP_NUM < BasicBlock::NOT_IN_LOOP); block->bbNatLoopNum = BasicBlock::NOT_IN_LOOP; return block; } //------------------------------------------------------------------------ // isBBCallAlwaysPair: Determine if this is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair // // Return Value: // True iff "this" is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair // -- a block corresponding to an exit from the try of a try/finally. // // Notes: // In the flow graph, this becomes a block that calls the finally, and a second, immediately // following empty block (in the bbNext chain) to which the finally will return, and which // branches unconditionally to the next block to be executed outside the try/finally. // Note that code is often generated differently than this description. For example, on ARM, // the target of the BBJ_ALWAYS is loaded in LR (the return register), and a direct jump is // made to the 'finally'. The effect is that the 'finally' returns directly to the target of // the BBJ_ALWAYS. A "retless" BBJ_CALLFINALLY is one that has no corresponding BBJ_ALWAYS. // This can happen if the finally is known to not return (e.g., it contains a 'throw'). In // that case, the BBJ_CALLFINALLY flags has BBF_RETLESS_CALL set. Note that ARM never has // "retless" BBJ_CALLFINALLY blocks due to a requirement to use the BBJ_ALWAYS for // generating code. // bool BasicBlock::isBBCallAlwaysPair() const { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (this->bbJumpKind == BBJ_CALLFINALLY) #else if ((this->bbJumpKind == BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) #endif { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // On ARM, there are no retless BBJ_CALLFINALLY. assert(!(this->bbFlags & BBF_RETLESS_CALL)); #endif // Some asserts that the next block is a BBJ_ALWAYS of the proper form. assert(this->bbNext != nullptr); assert(this->bbNext->bbJumpKind == BBJ_ALWAYS); assert(this->bbNext->bbFlags & BBF_KEEP_BBJ_ALWAYS); assert(this->bbNext->isEmpty()); return true; } else { return false; } } //------------------------------------------------------------------------ // isBBCallAlwaysPairTail: Determine if this is the last block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair // // Return Value: // True iff "this" is the last block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair // -- a block corresponding to an exit from the try of a try/finally. // // Notes: // See notes on isBBCallAlwaysPair(), above. // bool BasicBlock::isBBCallAlwaysPairTail() const { return (bbPrev != nullptr) && bbPrev->isBBCallAlwaysPair(); } //------------------------------------------------------------------------ // hasEHBoundaryIn: Determine if this block begins at an EH boundary. // // Return Value: // True iff the block is the target of an EH edge; false otherwise. // // Notes: // For the purposes of this method (and its callers), an EH edge is one on // which the EH flow model requires that all lclVars must be reloaded from // the stack before use, since control flow may transfer to this block through // control flow that is not reflected in the flowgraph. // Note that having a predecessor in a different EH region doesn't require // that lclVars must be reloaded from the stack. That's only required when // this block might be entered via flow that is not represented by an edge // in the flowgraph. // bool BasicBlock::hasEHBoundaryIn() const { bool returnVal = (bbCatchTyp != BBCT_NONE); if (!returnVal) { #if FEATURE_EH_FUNCLETS assert((bbFlags & BBF_FUNCLET_BEG) == 0); #endif // FEATURE_EH_FUNCLETS } return returnVal; } //------------------------------------------------------------------------ // hasEHBoundaryOut: Determine if this block ends in an EH boundary. // // Return Value: // True iff the block ends in an exception boundary that requires that no lclVars // are live in registers; false otherwise. // // Notes: // We may have a successor in a different EH region, but it is OK to have lclVars // live in registers if any successor is a normal flow edge. That's because the // EH write-thru semantics ensure that we always have an up-to-date value on the stack. // bool BasicBlock::hasEHBoundaryOut() const { bool returnVal = false; if (bbJumpKind == BBJ_EHFILTERRET) { returnVal = true; } if (bbJumpKind == BBJ_EHFINALLYRET) { returnVal = true; } #if FEATURE_EH_FUNCLETS if (bbJumpKind == BBJ_EHCATCHRET) { returnVal = true; } #endif // FEATURE_EH_FUNCLETS return returnVal; } //------------------------------------------------------------------------ // BBswtDesc copy ctor: copy a switch descriptor // // Arguments: // comp - compiler instance // other - existing switch descriptor to copy // BBswtDesc::BBswtDesc(Compiler* comp, const BBswtDesc* other) : bbsDstTab(nullptr) , bbsCount(other->bbsCount) , bbsDominantCase(other->bbsDominantCase) , bbsDominantFraction(other->bbsDominantFraction) , bbsHasDefault(other->bbsHasDefault) , bbsHasDominantCase(other->bbsHasDominantCase) { // Allocate and fill in a new dst tab // bbsDstTab = new (comp, CMK_BasicBlock) BasicBlock*[bbsCount]; for (unsigned i = 0; i < bbsCount; i++) { bbsDstTab[i] = other->bbsDstTab[i]; } } //------------------------------------------------------------------------ // unmarkLoopAlign: Unmarks the LOOP_ALIGN flag from the block and reduce the // loop alignment count. // // Arguments: // compiler - Compiler instance // reason - Reason to print in JITDUMP // void BasicBlock::unmarkLoopAlign(Compiler* compiler DEBUG_ARG(const char* reason)) { // Make sure we unmark and count just once. if (isLoopAlign()) { compiler->loopAlignCandidates--; bbFlags &= ~BBF_LOOP_ALIGN; JITDUMP("Unmarking LOOP_ALIGN from " FMT_BB ". Reason= %s.\n", bbNum, reason); } }
1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/jit/block.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX BasicBlock XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _BLOCK_H_ #define _BLOCK_H_ /*****************************************************************************/ #include "vartype.h" // For "var_types.h" #include "_typeinfo.h" /*****************************************************************************/ // Defines VARSET_TP #include "varset.h" #include "blockset.h" #include "jitstd.h" #include "bitvec.h" #include "jithashtable.h" /*****************************************************************************/ typedef BitVec EXPSET_TP; typedef BitVec_ValArg_T EXPSET_VALARG_TP; typedef BitVec_ValRet_T EXPSET_VALRET_TP; #define EXPSET_SZ 64 typedef BitVec ASSERT_TP; typedef BitVec_ValArg_T ASSERT_VALARG_TP; typedef BitVec_ValRet_T ASSERT_VALRET_TP; // We use the following format when printing the BasicBlock number: bbNum // This define is used with string concatenation to put this in printf format strings (Note that %u means unsigned int) #define FMT_BB "BB%02u" // Use this format for loop table indices. #define FMT_LP "L%02u" // And this format for profile weights #define FMT_WT "%.7g" /***************************************************************************** * * Each basic block ends with a jump which is described as a value * of the following enumeration. */ // clang-format off enum BBjumpKinds : BYTE { BBJ_EHFINALLYRET,// block ends with 'endfinally' (for finally or fault) BBJ_EHFILTERRET, // block ends with 'endfilter' BBJ_EHCATCHRET, // block ends with a leave out of a catch (only #if defined(FEATURE_EH_FUNCLETS)) BBJ_THROW, // block ends with 'throw' BBJ_RETURN, // block ends with 'ret' BBJ_NONE, // block flows into the next one (no jump) BBJ_ALWAYS, // block always jumps to the target BBJ_LEAVE, // block always jumps to the target, maybe out of guarded region. Only used until importing. BBJ_CALLFINALLY, // block always calls the target finally BBJ_COND, // block conditionally jumps to the target BBJ_SWITCH, // block ends with a switch statement BBJ_COUNT }; // clang-format on struct GenTree; struct Statement; struct BasicBlock; class Compiler; class typeInfo; struct BasicBlockList; struct flowList; struct EHblkDsc; struct BBswtDesc; struct StackEntry { GenTree* val; typeInfo seTypeInfo; }; /*****************************************************************************/ enum ThisInitState { TIS_Bottom, // We don't know anything about the 'this' pointer. TIS_Uninit, // The 'this' pointer for this constructor is known to be uninitialized. TIS_Init, // The 'this' pointer for this constructor is known to be initialized. TIS_Top, // This results from merging the state of two blocks one with TIS_Unint and the other with TIS_Init. // We use this in fault blocks to prevent us from accessing the 'this' pointer, but otherwise // allowing the fault block to generate code. }; struct EntryState { ThisInitState thisInitialized; // used to track whether the this ptr is initialized. unsigned esStackDepth; // size of esStack StackEntry* esStack; // ptr to stack }; // Enumeration of the kinds of memory whose state changes the compiler tracks enum MemoryKind { ByrefExposed = 0, // Includes anything byrefs can read/write (everything in GcHeap, address-taken locals, // unmanaged heap, callers' locals, etc.) GcHeap, // Includes actual GC heap, and also static fields MemoryKindCount, // Number of MemoryKinds }; #ifdef DEBUG const char* const memoryKindNames[] = {"ByrefExposed", "GcHeap"}; #endif // DEBUG // Bitmask describing a set of memory kinds (usable in bitfields) typedef unsigned int MemoryKindSet; // Bitmask for a MemoryKindSet containing just the specified MemoryKind inline MemoryKindSet memoryKindSet(MemoryKind memoryKind) { return (1U << memoryKind); } // Bitmask for a MemoryKindSet containing the specified MemoryKinds template <typename... MemoryKinds> inline MemoryKindSet memoryKindSet(MemoryKind memoryKind, MemoryKinds... memoryKinds) { return memoryKindSet(memoryKind) | memoryKindSet(memoryKinds...); } // Bitmask containing all the MemoryKinds const MemoryKindSet fullMemoryKindSet = (1 << MemoryKindCount) - 1; // Bitmask containing no MemoryKinds const MemoryKindSet emptyMemoryKindSet = 0; // Standard iterator class for iterating through MemoryKinds class MemoryKindIterator { int value; public: explicit inline MemoryKindIterator(int val) : value(val) { } inline MemoryKindIterator& operator++() { ++value; return *this; } inline MemoryKindIterator operator++(int) { return MemoryKindIterator(value++); } inline MemoryKind operator*() { return static_cast<MemoryKind>(value); } friend bool operator==(const MemoryKindIterator& left, const MemoryKindIterator& right) { return left.value == right.value; } friend bool operator!=(const MemoryKindIterator& left, const MemoryKindIterator& right) { return left.value != right.value; } }; // Empty struct that allows enumerating memory kinds via `for(MemoryKind kind : allMemoryKinds())` struct allMemoryKinds { inline allMemoryKinds() { } inline MemoryKindIterator begin() { return MemoryKindIterator(0); } inline MemoryKindIterator end() { return MemoryKindIterator(MemoryKindCount); } }; // This encapsulates the "exception handling" successors of a block. That is, // if a basic block BB1 occurs in a try block, we consider the first basic block // BB2 of the corresponding handler to be an "EH successor" of BB1. Because we // make the conservative assumption that control flow can jump from a try block // to its handler at any time, the immediate (regular control flow) // predecessor(s) of the the first block of a try block are also considered to // have the first block of the handler as an EH successor. This makes variables that // are "live-in" to the handler become "live-out" for these try-predecessor block, // so that they become live-in to the try -- which we require. // // This class maintains the minimum amount of state necessary to implement // successor iteration. The basic block whose successors are enumerated and // the compiler need to be provided by Advance/Current's callers. In addition // to iterators, this allows the use of other approaches that are more space // efficient. class EHSuccessorIterPosition { // The number of "regular" (i.e., non-exceptional) successors that remain to // be considered. If BB1 has successor BB2, and BB2 is the first block of a // try block, then we consider the catch block of BB2's try to be an EH // successor of BB1. This captures the iteration over the successors of BB1 // for this purpose. (In reverse order; we're done when this field is 0). unsigned m_remainingRegSuccs; // The current "regular" successor of "m_block" that we're considering. BasicBlock* m_curRegSucc; // The current try block. If non-null, then the current successor "m_curRegSucc" // is the first block of the handler of this block. While this try block has // enclosing try's that also start with "m_curRegSucc", the corresponding handlers will be // further EH successors. EHblkDsc* m_curTry; // Requires that "m_curTry" is NULL. Determines whether there is, as // discussed just above, a regular successor that's the first block of a // try; if so, sets "m_curTry" to that try block. (As noted above, selecting // the try containing the current regular successor as the "current try" may cause // multiple first-blocks of catches to be yielded as EH successors: trys enclosing // the current try are also included if they also start with the current EH successor.) void FindNextRegSuccTry(Compiler* comp, BasicBlock* block); public: // Constructs a position that "points" to the first EH successor of `block`. EHSuccessorIterPosition(Compiler* comp, BasicBlock* block); // Constructs a position that "points" past the last EH successor of `block` ("end" position). EHSuccessorIterPosition() : m_remainingRegSuccs(0), m_curTry(nullptr) { } // Go on to the next EH successor. void Advance(Compiler* comp, BasicBlock* block); // Returns the current EH successor. // Requires that "*this" is not equal to the "end" position. BasicBlock* Current(Compiler* comp, BasicBlock* block); // Returns "true" iff "*this" is equal to "ehsi". bool operator==(const EHSuccessorIterPosition& ehsi) { return m_curTry == ehsi.m_curTry && m_remainingRegSuccs == ehsi.m_remainingRegSuccs; } bool operator!=(const EHSuccessorIterPosition& ehsi) { return !((*this) == ehsi); } }; // Yields both normal and EH successors (in that order) in one iteration. // // This class maintains the minimum amount of state necessary to implement // successor iteration. The basic block whose successors are enumerated and // the compiler need to be provided by Advance/Current's callers. In addition // to iterators, this allows the use of other approaches that are more space // efficient. class AllSuccessorIterPosition { // Normal successor position unsigned m_numNormSuccs; unsigned m_remainingNormSucc; // EH successor position EHSuccessorIterPosition m_ehIter; // True iff m_blk is a BBJ_CALLFINALLY block, and the current try block of m_ehIter, // the first block of whose handler would be next yielded, is the jump target of m_blk. inline bool CurTryIsBlkCallFinallyTarget(Compiler* comp, BasicBlock* block); public: // Constructs a position that "points" to the first successor of `block`. inline AllSuccessorIterPosition(Compiler* comp, BasicBlock* block); // Constructs a position that "points" past the last successor of `block` ("end" position). AllSuccessorIterPosition() : m_remainingNormSucc(0), m_ehIter() { } // Go on to the next successor. inline void Advance(Compiler* comp, BasicBlock* block); // Returns the current successor. // Requires that "*this" is not equal to the "end" position. inline BasicBlock* Current(Compiler* comp, BasicBlock* block); bool IsCurrentEH() { return m_remainingNormSucc == 0; } bool HasCurrent() { return *this != AllSuccessorIterPosition(); } // Returns "true" iff "*this" is equal to "asi". bool operator==(const AllSuccessorIterPosition& asi) { return (m_remainingNormSucc == asi.m_remainingNormSucc) && (m_ehIter == asi.m_ehIter); } bool operator!=(const AllSuccessorIterPosition& asi) { return !((*this) == asi); } }; // PredEdgeList: adapter class for forward iteration of the predecessor edge linked list using range-based `for`, // normally used via BasicBlock::PredEdges(), e.g.: // for (flowList* const edge : block->PredEdges()) ... // class PredEdgeList { flowList* m_begin; // Forward iterator for the predecessor edges linked list. // The caller can't make changes to the preds list when using this. // class iterator { flowList* m_pred; #ifdef DEBUG // Try to guard against the user of the iterator from making changes to the IR that would invalidate // the iterator: cache the edge we think should be next, then check it when we actually do the `++` // operation. This is a bit conservative, but attempts to protect against callers assuming too much about // this iterator implementation. flowList* m_next; #endif public: iterator(flowList* pred); flowList* operator*() const { return m_pred; } iterator& operator++(); bool operator!=(const iterator& i) const { return m_pred != i.m_pred; } }; public: PredEdgeList(flowList* pred) : m_begin(pred) { } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(nullptr); } }; // PredBlockList: adapter class for forward iteration of the predecessor edge linked list yielding // predecessor blocks, using range-based `for`, normally used via BasicBlock::PredBlocks(), e.g.: // for (BasicBlock* const predBlock : block->PredBlocks()) ... // class PredBlockList { flowList* m_begin; // Forward iterator for the predecessor edges linked list, yielding the predecessor block, not the edge. // The caller can't make changes to the preds list when using this. // class iterator { flowList* m_pred; #ifdef DEBUG // Try to guard against the user of the iterator from making changes to the IR that would invalidate // the iterator: cache the edge we think should be next, then check it when we actually do the `++` // operation. This is a bit conservative, but attempts to protect against callers assuming too much about // this iterator implementation. flowList* m_next; #endif public: iterator(flowList* pred); BasicBlock* operator*() const; iterator& operator++(); bool operator!=(const iterator& i) const { return m_pred != i.m_pred; } }; public: PredBlockList(flowList* pred) : m_begin(pred) { } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(nullptr); } }; // BBArrayIterator: forward iterator for an array of BasicBlock*, such as the BBswtDesc->bbsDstTab. // It is an error (with assert) to yield a nullptr BasicBlock* in this array. // `m_bbEntry` can be nullptr, but it only makes sense if both the begin and end of an iteration range are nullptr // (meaning, no actual iteration will happen). // class BBArrayIterator { BasicBlock* const* m_bbEntry; public: BBArrayIterator(BasicBlock* const* bbEntry) : m_bbEntry(bbEntry) { } BasicBlock* operator*() const { assert(m_bbEntry != nullptr); BasicBlock* bTarget = *m_bbEntry; assert(bTarget != nullptr); return bTarget; } BBArrayIterator& operator++() { assert(m_bbEntry != nullptr); ++m_bbEntry; return *this; } bool operator!=(const BBArrayIterator& i) const { return m_bbEntry != i.m_bbEntry; } }; // BBSwitchTargetList: adapter class for forward iteration of switch targets, using range-based `for`, // normally used via BasicBlock::SwitchTargets(), e.g.: // for (BasicBlock* const target : block->SwitchTargets()) ... // class BBSwitchTargetList { BBswtDesc* m_bbsDesc; public: BBSwitchTargetList(BBswtDesc* bbsDesc); BBArrayIterator begin() const; BBArrayIterator end() const; }; //------------------------------------------------------------------------ // BasicBlockFlags: a bitmask of flags for BasicBlock // // clang-format off enum BasicBlockFlags : unsigned __int64 { #define MAKE_BBFLAG(bit) (1ULL << (bit)) BBF_EMPTY = 0, BBF_VISITED = MAKE_BBFLAG( 0), // BB visited during optimizations BBF_MARKED = MAKE_BBFLAG( 1), // BB marked during optimizations BBF_CHANGED = MAKE_BBFLAG( 2), // input/output of this block has changed BBF_REMOVED = MAKE_BBFLAG( 3), // BB has been removed from bb-list BBF_DONT_REMOVE = MAKE_BBFLAG( 4), // BB should not be removed during flow graph optimizations BBF_IMPORTED = MAKE_BBFLAG( 5), // BB byte-code has been imported BBF_INTERNAL = MAKE_BBFLAG( 6), // BB has been added by the compiler BBF_FAILED_VERIFICATION = MAKE_BBFLAG( 7), // BB has verification exception BBF_TRY_BEG = MAKE_BBFLAG( 8), // BB starts a 'try' block BBF_FUNCLET_BEG = MAKE_BBFLAG( 9), // BB is the beginning of a funclet BBF_HAS_NULLCHECK = MAKE_BBFLAG(10), // BB contains a null check BBF_HAS_SUPPRESSGC_CALL = MAKE_BBFLAG(11), // BB contains a call to a method with SuppressGCTransitionAttribute BBF_RUN_RARELY = MAKE_BBFLAG(12), // BB is rarely run (catch clauses, blocks with throws etc) BBF_LOOP_HEAD = MAKE_BBFLAG(13), // BB is the head of a loop BBF_LOOP_CALL0 = MAKE_BBFLAG(14), // BB starts a loop that sometimes won't call BBF_LOOP_CALL1 = MAKE_BBFLAG(15), // BB starts a loop that will always call BBF_HAS_LABEL = MAKE_BBFLAG(16), // BB needs a label BBF_LOOP_ALIGN = MAKE_BBFLAG(17), // Block is lexically the first block in a loop we intend to align. BBF_HAS_JMP = MAKE_BBFLAG(18), // BB executes a JMP instruction (instead of return) BBF_GC_SAFE_POINT = MAKE_BBFLAG(19), // BB has a GC safe point (a call). More abstractly, BB does not require a // (further) poll -- this may be because this BB has a call, or, in some // cases, because the BB occurs in a loop, and we've determined that all // paths in the loop body leading to BB include a call. BBF_HAS_IDX_LEN = MAKE_BBFLAG(20), // BB contains simple index or length expressions on an array local var. BBF_HAS_NEWARRAY = MAKE_BBFLAG(21), // BB contains 'new' of an array BBF_HAS_NEWOBJ = MAKE_BBFLAG(22), // BB contains 'new' of an object type. #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BBF_FINALLY_TARGET = MAKE_BBFLAG(23), // BB is the target of a finally return: where a finally will return during // non-exceptional flow. Because the ARM calling sequence for calling a // finally explicitly sets the return address to the finally target and jumps // to the finally, instead of using a call instruction, ARM needs this to // generate correct code at the finally target, to allow for proper stack // unwind from within a non-exceptional call to a finally. #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BBF_BACKWARD_JUMP = MAKE_BBFLAG(24), // BB is surrounded by a backward jump/switch arc BBF_RETLESS_CALL = MAKE_BBFLAG(25), // BBJ_CALLFINALLY that will never return (and therefore, won't need a paired // BBJ_ALWAYS); see isBBCallAlwaysPair(). BBF_LOOP_PREHEADER = MAKE_BBFLAG(26), // BB is a loop preheader block BBF_COLD = MAKE_BBFLAG(27), // BB is cold BBF_PROF_WEIGHT = MAKE_BBFLAG(28), // BB weight is computed from profile data BBF_IS_LIR = MAKE_BBFLAG(29), // Set if the basic block contains LIR (as opposed to HIR) BBF_KEEP_BBJ_ALWAYS = MAKE_BBFLAG(30), // A special BBJ_ALWAYS block, used by EH code generation. Keep the jump kind // as BBJ_ALWAYS. Used for the paired BBJ_ALWAYS block following the // BBJ_CALLFINALLY block, as well as, on x86, the final step block out of a // finally. BBF_CLONED_FINALLY_BEGIN = MAKE_BBFLAG(31), // First block of a cloned finally region BBF_CLONED_FINALLY_END = MAKE_BBFLAG(32), // Last block of a cloned finally region BBF_HAS_CALL = MAKE_BBFLAG(33), // BB contains a call BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY = MAKE_BBFLAG(34), // Block is dominated by exceptional entry. BBF_BACKWARD_JUMP_TARGET = MAKE_BBFLAG(35), // Block is a target of a backward jump BBF_PATCHPOINT = MAKE_BBFLAG(36), // Block is a patchpoint BBF_HAS_CLASS_PROFILE = MAKE_BBFLAG(37), // BB contains a call needing a class profile BBF_PARTIAL_COMPILATION_PATCHPOINT = MAKE_BBFLAG(38), // Block is a partial compilation patchpoint BBF_HAS_ALIGN = MAKE_BBFLAG(39), // BB ends with 'align' instruction BBF_TAILCALL_SUCCESSOR = MAKE_BBFLAG(40), // BB has pred that has potential tail call // The following are sets of flags. // Flags that relate blocks to loop structure. BBF_LOOP_FLAGS = BBF_LOOP_PREHEADER | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_LOOP_ALIGN, // Flags to update when two blocks are compacted BBF_COMPACT_UPD = BBF_CHANGED | BBF_GC_SAFE_POINT | BBF_HAS_JMP | BBF_HAS_IDX_LEN | BBF_BACKWARD_JUMP | BBF_HAS_NEWARRAY | \ BBF_HAS_NEWOBJ | BBF_HAS_NULLCHECK, // Flags a block should not have had before it is split. BBF_SPLIT_NONEXIST = BBF_CHANGED | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_RETLESS_CALL | BBF_LOOP_PREHEADER | BBF_COLD, // Flags lost by the top block when a block is split. // Note, this is a conservative guess. // For example, the top block might or might not have BBF_GC_SAFE_POINT, // but we assume it does not have BBF_GC_SAFE_POINT any more. BBF_SPLIT_LOST = BBF_GC_SAFE_POINT | BBF_HAS_JMP | BBF_KEEP_BBJ_ALWAYS | BBF_CLONED_FINALLY_END, // Flags gained by the bottom block when a block is split. // Note, this is a conservative guess. // For example, the bottom block might or might not have BBF_HAS_NEWARRAY or BBF_HAS_NULLCHECK, // but we assume it has BBF_HAS_NEWARRAY and BBF_HAS_NULLCHECK. // TODO: Should BBF_RUN_RARELY be added to BBF_SPLIT_GAINED ? BBF_SPLIT_GAINED = BBF_DONT_REMOVE | BBF_HAS_JMP | BBF_BACKWARD_JUMP | BBF_HAS_IDX_LEN | BBF_HAS_NEWARRAY | BBF_PROF_WEIGHT | \ BBF_HAS_NEWOBJ | BBF_KEEP_BBJ_ALWAYS | BBF_CLONED_FINALLY_END | BBF_HAS_NULLCHECK | BBF_HAS_CLASS_PROFILE, }; inline constexpr BasicBlockFlags operator ~(BasicBlockFlags a) { return (BasicBlockFlags)(~(unsigned __int64)a); } inline constexpr BasicBlockFlags operator |(BasicBlockFlags a, BasicBlockFlags b) { return (BasicBlockFlags)((unsigned __int64)a | (unsigned __int64)b); } inline constexpr BasicBlockFlags operator &(BasicBlockFlags a, BasicBlockFlags b) { return (BasicBlockFlags)((unsigned __int64)a & (unsigned __int64)b); } inline BasicBlockFlags& operator |=(BasicBlockFlags& a, BasicBlockFlags b) { return a = (BasicBlockFlags)((unsigned __int64)a | (unsigned __int64)b); } inline BasicBlockFlags& operator &=(BasicBlockFlags& a, BasicBlockFlags b) { return a = (BasicBlockFlags)((unsigned __int64)a & (unsigned __int64)b); } // clang-format on //------------------------------------------------------------------------ // BasicBlock: describes a basic block in the flowgraph. // // Note that this type derives from LIR::Range in order to make the LIR // utilities that are polymorphic over basic block and scratch ranges // faster and simpler. // struct BasicBlock : private LIR::Range { friend class LIR; BasicBlock* bbNext; // next BB in ascending PC offset order BasicBlock* bbPrev; void setNext(BasicBlock* next) { bbNext = next; if (next) { next->bbPrev = this; } } BasicBlockFlags bbFlags; static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_LOST) == 0); static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_GAINED) == 0); unsigned bbNum; // the block's number unsigned bbRefs; // number of blocks that can reach here, either by fall-through or a branch. If this falls to zero, // the block is unreachable. bool isRunRarely() const { return ((bbFlags & BBF_RUN_RARELY) != 0); } bool isLoopHead() const { return ((bbFlags & BBF_LOOP_HEAD) != 0); } bool isLoopAlign() const { return ((bbFlags & BBF_LOOP_ALIGN) != 0); } void unmarkLoopAlign(Compiler* comp DEBUG_ARG(const char* reason)); bool hasAlign() const { return ((bbFlags & BBF_HAS_ALIGN) != 0); } #ifdef DEBUG void dspFlags(); // Print the flags unsigned dspCheapPreds(); // Print the predecessors (bbCheapPreds) unsigned dspPreds(); // Print the predecessors (bbPreds) void dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH // regions are printed: see NumSucc() for details. void dspJumpKind(); // Print the block jump kind (e.g., BBJ_NONE, BBJ_COND, etc.). // Print a simple basic block header for various output, including a list of predecessors and successors. void dspBlockHeader(Compiler* compiler, bool showKind = true, bool showFlags = false, bool showPreds = true); const char* dspToString(int blockNumPadding = 0); #endif // DEBUG #define BB_UNITY_WEIGHT 100.0 // how much a normal execute once block weighs #define BB_UNITY_WEIGHT_UNSIGNED 100 // how much a normal execute once block weighs #define BB_LOOP_WEIGHT_SCALE 8.0 // synthetic profile scale factor for loops #define BB_ZERO_WEIGHT 0.0 #define BB_MAX_WEIGHT FLT_MAX // maximum finite weight -- needs rethinking. weight_t bbWeight; // The dynamic execution weight of this block // getCalledCount -- get the value used to normalize weights for this method static weight_t getCalledCount(Compiler* comp); // getBBWeight -- get the normalized weight of this block weight_t getBBWeight(Compiler* comp); // hasProfileWeight -- Returns true if this block's weight came from profile data bool hasProfileWeight() const { return ((this->bbFlags & BBF_PROF_WEIGHT) != 0); } // setBBProfileWeight -- Set the profile-derived weight for a basic block // and update the run rarely flag as appropriate. void setBBProfileWeight(weight_t weight) { this->bbFlags |= BBF_PROF_WEIGHT; this->bbWeight = weight; if (weight == BB_ZERO_WEIGHT) { this->bbFlags |= BBF_RUN_RARELY; } else { this->bbFlags &= ~BBF_RUN_RARELY; } } // this block will inherit the same weight and relevant bbFlags as bSrc // void inheritWeight(BasicBlock* bSrc) { inheritWeightPercentage(bSrc, 100); } // Similar to inheritWeight(), but we're splitting a block (such as creating blocks for qmark removal). // So, specify a percentage (0 to 100) of the weight the block should inherit. // // Can be invoked as a self-rescale, eg: block->inheritWeightPecentage(block, 50)) // void inheritWeightPercentage(BasicBlock* bSrc, unsigned percentage) { assert(0 <= percentage && percentage <= 100); this->bbWeight = (bSrc->bbWeight * percentage) / 100; if (bSrc->hasProfileWeight()) { this->bbFlags |= BBF_PROF_WEIGHT; } else { this->bbFlags &= ~BBF_PROF_WEIGHT; } if (this->bbWeight == BB_ZERO_WEIGHT) { this->bbFlags |= BBF_RUN_RARELY; } else { this->bbFlags &= ~BBF_RUN_RARELY; } } // Scale a blocks' weight by some factor. // void scaleBBWeight(weight_t scale) { this->bbWeight = this->bbWeight * scale; if (this->bbWeight == BB_ZERO_WEIGHT) { this->bbFlags |= BBF_RUN_RARELY; } else { this->bbFlags &= ~BBF_RUN_RARELY; } } // Set block weight to zero, and set run rarely flag. // void bbSetRunRarely() { this->scaleBBWeight(BB_ZERO_WEIGHT); } // makeBlockHot() // This is used to override any profiling data // and force a block to be in the hot region. // We only call this method for handler entry point // and only when HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION is 1. // Doing this helps fgReorderBlocks() by telling // it to try to move these blocks into the hot region. // Note that we do this strictly as an optimization, // not for correctness. fgDetermineFirstColdBlock() // will find all handler entry points and ensure that // for now we don't place them in the cold section. // void makeBlockHot() { if (this->bbWeight == BB_ZERO_WEIGHT) { this->bbFlags &= ~BBF_RUN_RARELY; // Clear any RarelyRun flag this->bbFlags &= ~BBF_PROF_WEIGHT; // Clear any profile-derived flag this->bbWeight = 1; } } bool isMaxBBWeight() const { return (bbWeight >= BB_MAX_WEIGHT); } // Returns "true" if the block is empty. Empty here means there are no statement // trees *except* PHI definitions. bool isEmpty() const; bool isValid() const; // Returns "true" iff "this" is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair -- // a block corresponding to an exit from the try of a try/finally. bool isBBCallAlwaysPair() const; // Returns "true" iff "this" is the last block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair -- // a block corresponding to an exit from the try of a try/finally. bool isBBCallAlwaysPairTail() const; BBjumpKinds bbJumpKind; // jump (if any) at the end of this block /* The following union describes the jump target(s) of this block */ union { unsigned bbJumpOffs; // PC offset (temporary only) BasicBlock* bbJumpDest; // basic block BBswtDesc* bbJumpSwt; // switch descriptor }; bool KindIs(BBjumpKinds kind) const { return bbJumpKind == kind; } template <typename... T> bool KindIs(BBjumpKinds kind, T... rest) const { return KindIs(kind) || KindIs(rest...); } // NumSucc() gives the number of successors, and GetSucc() returns a given numbered successor. // // There are two versions of these functions: ones that take a Compiler* and ones that don't. You must // always use a matching set. Thus, if you call NumSucc() without a Compiler*, you must also call // GetSucc() without a Compiler*. // // The behavior of NumSucc()/GetSucc() is different when passed a Compiler* for blocks that end in: // (1) BBJ_EHFINALLYRET (a return from a finally or fault block) // (2) BBJ_EHFILTERRET (a return from EH filter block) // (3) BBJ_SWITCH // // For BBJ_EHFINALLYRET, if no Compiler* is passed, then the block is considered to have no // successor. If Compiler* is passed, we figure out the actual successors. Some cases will want one behavior, // other cases the other. For example, IL verification requires that these blocks end in an empty operand // stack, and since the dataflow analysis of IL verification is concerned only with the contents of the // operand stack, we can consider the finally block to have no successors. But a more general dataflow // analysis that is tracking the contents of local variables might want to consider *all* successors, // and would pass the current Compiler object. // // Similarly, BBJ_EHFILTERRET blocks are assumed to have no successors if Compiler* is not passed; if // Compiler* is passed, NumSucc/GetSucc yields the first block of the try block's handler. // // For BBJ_SWITCH, if Compiler* is not passed, then all switch successors are returned. If Compiler* // is passed, then only unique switch successors are returned; the duplicate successors are omitted. // // Note that for BBJ_COND, which has two successors (fall through and condition true branch target), // only the unique targets are returned. Thus, if both targets are the same, NumSucc() will only return 1 // instead of 2. // // NumSucc: Returns the number of successors of "this". unsigned NumSucc() const; unsigned NumSucc(Compiler* comp); // GetSucc: Returns the "i"th successor. Requires (0 <= i < NumSucc()). BasicBlock* GetSucc(unsigned i) const; BasicBlock* GetSucc(unsigned i, Compiler* comp); // SwitchTargets: convenience methods for enabling range-based `for` iteration over a switch block's targets, e.g.: // for (BasicBlock* const bTarget : block->SwitchTargets()) ... // BBSwitchTargetList SwitchTargets() const { assert(bbJumpKind == BBJ_SWITCH); return BBSwitchTargetList(bbJumpSwt); } BasicBlock* GetUniquePred(Compiler* comp) const; BasicBlock* GetUniqueSucc() const; unsigned countOfInEdges() const { return bbRefs; } Statement* bbStmtList; GenTree* GetFirstLIRNode() const { return m_firstNode; } void SetFirstLIRNode(GenTree* tree) { m_firstNode = tree; } union { EntryState* bbEntryState; // verifier tracked state of all entries in stack. flowList* bbLastPred; // last pred list entry }; #define NO_BASE_TMP UINT_MAX // base# to use when we have none union { unsigned bbStkTempsIn; // base# for input stack temps int bbCountSchemaIndex; // schema index for count instrumentation }; union { unsigned bbStkTempsOut; // base# for output stack temps int bbClassSchemaIndex; // schema index for class instrumentation }; #define MAX_XCPTN_INDEX (USHRT_MAX - 1) // It would be nice to make bbTryIndex and bbHndIndex private, but there is still code that uses them directly, // especially Compiler::fgNewBBinRegion() and friends. // index, into the compHndBBtab table, of innermost 'try' clause containing the BB (used for raising exceptions). // Stored as index + 1; 0 means "no try index". unsigned short bbTryIndex; // index, into the compHndBBtab table, of innermost handler (filter, catch, fault/finally) containing the BB. // Stored as index + 1; 0 means "no handler index". unsigned short bbHndIndex; // Given two EH indices that are either bbTryIndex or bbHndIndex (or related), determine if index1 might be more // deeply nested than index2. Both index1 and index2 are in the range [0..compHndBBtabCount], where 0 means // "main function" and otherwise the value is an index into compHndBBtab[]. Note that "sibling" EH regions will // have a numeric index relationship that doesn't indicate nesting, whereas a more deeply nested region must have // a lower index than the region it is nested within. Note that if you compare a single block's bbTryIndex and // bbHndIndex, there is guaranteed to be a nesting relationship, since that block can't be simultaneously in two // sibling EH regions. In that case, "maybe" is actually "definitely". static bool ehIndexMaybeMoreNested(unsigned index1, unsigned index2) { if (index1 == 0) { // index1 is in the main method. It can't be more deeply nested than index2. return false; } else if (index2 == 0) { // index1 represents an EH region, whereas index2 is the main method. Thus, index1 is more deeply nested. assert(index1 > 0); return true; } else { // If index1 has a smaller index, it might be more deeply nested than index2. assert(index1 > 0); assert(index2 > 0); return index1 < index2; } } // catch type: class token of handler, or one of BBCT_*. Only set on first block of catch handler. unsigned bbCatchTyp; bool hasTryIndex() const { return bbTryIndex != 0; } bool hasHndIndex() const { return bbHndIndex != 0; } unsigned getTryIndex() const { assert(bbTryIndex != 0); return bbTryIndex - 1; } unsigned getHndIndex() const { assert(bbHndIndex != 0); return bbHndIndex - 1; } void setTryIndex(unsigned val) { bbTryIndex = (unsigned short)(val + 1); assert(bbTryIndex != 0); } void setHndIndex(unsigned val) { bbHndIndex = (unsigned short)(val + 1); assert(bbHndIndex != 0); } void clearTryIndex() { bbTryIndex = 0; } void clearHndIndex() { bbHndIndex = 0; } void copyEHRegion(const BasicBlock* from) { bbTryIndex = from->bbTryIndex; bbHndIndex = from->bbHndIndex; } void copyTryIndex(const BasicBlock* from) { bbTryIndex = from->bbTryIndex; } void copyHndIndex(const BasicBlock* from) { bbHndIndex = from->bbHndIndex; } static bool sameTryRegion(const BasicBlock* blk1, const BasicBlock* blk2) { return blk1->bbTryIndex == blk2->bbTryIndex; } static bool sameHndRegion(const BasicBlock* blk1, const BasicBlock* blk2) { return blk1->bbHndIndex == blk2->bbHndIndex; } static bool sameEHRegion(const BasicBlock* blk1, const BasicBlock* blk2) { return sameTryRegion(blk1, blk2) && sameHndRegion(blk1, blk2); } bool hasEHBoundaryIn() const; bool hasEHBoundaryOut() const; // Some non-zero value that will not collide with real tokens for bbCatchTyp #define BBCT_NONE 0x00000000 #define BBCT_FAULT 0xFFFFFFFC #define BBCT_FINALLY 0xFFFFFFFD #define BBCT_FILTER 0xFFFFFFFE #define BBCT_FILTER_HANDLER 0xFFFFFFFF #define handlerGetsXcptnObj(hndTyp) ((hndTyp) != BBCT_NONE && (hndTyp) != BBCT_FAULT && (hndTyp) != BBCT_FINALLY) // The following fields are used for loop detection typedef unsigned char loopNumber; static const unsigned NOT_IN_LOOP = UCHAR_MAX; static const unsigned MAX_LOOP_NUM = 64; loopNumber bbNatLoopNum; // Index, in optLoopTable, of most-nested loop that contains this block, // or else NOT_IN_LOOP if this block is not in a loop. // TODO-Cleanup: Get rid of bbStkDepth and use bbStackDepthOnEntry() instead union { unsigned short bbStkDepth; // stack depth on entry unsigned short bbFPinVars; // number of inner enregistered FP vars }; // Basic block predecessor lists. Early in compilation, some phases might need to compute "cheap" predecessor // lists. These are stored in bbCheapPreds, computed by fgComputeCheapPreds(). If bbCheapPreds is valid, // 'fgCheapPredsValid' will be 'true'. Later, the "full" predecessor lists are created by fgComputePreds(), stored // in 'bbPreds', and then maintained throughout compilation. 'fgComputePredsDone' will be 'true' after the // full predecessor lists are created. See the comment at fgComputeCheapPreds() to see how those differ from // the "full" variant. union { BasicBlockList* bbCheapPreds; // ptr to list of cheap predecessors (used before normal preds are computed) flowList* bbPreds; // ptr to list of predecessors }; // PredEdges: convenience method for enabling range-based `for` iteration over predecessor edges, e.g.: // for (flowList* const edge : block->PredEdges()) ... // PredEdgeList PredEdges() const { return PredEdgeList(bbPreds); } // PredBlocks: convenience method for enabling range-based `for` iteration over predecessor blocks, e.g.: // for (BasicBlock* const predBlock : block->PredBlocks()) ... // PredBlockList PredBlocks() const { return PredBlockList(bbPreds); } // Pred list maintenance // bool checkPredListOrder(); void ensurePredListOrder(Compiler* compiler); void reorderPredList(Compiler* compiler); BlockSet bbReach; // Set of all blocks that can reach this one union { BasicBlock* bbIDom; // Represent the closest dominator to this block (called the Immediate // Dominator) used to compute the dominance tree. void* bbSparseProbeList; // Used early on by fgInstrument void* bbSparseCountInfo; // Used early on by fgIncorporateEdgeCounts }; unsigned bbPostOrderNum; // the block's post order number in the graph. IL_OFFSET bbCodeOffs; // IL offset of the beginning of the block IL_OFFSET bbCodeOffsEnd; // IL offset past the end of the block. Thus, the [bbCodeOffs..bbCodeOffsEnd) // range is not inclusive of the end offset. The count of IL bytes in the block // is bbCodeOffsEnd - bbCodeOffs, assuming neither are BAD_IL_OFFSET. #ifdef DEBUG void dspBlockILRange() const; // Display the block's IL range as [XXX...YYY), where XXX and YYY might be "???" for // BAD_IL_OFFSET. #endif // DEBUG VARSET_TP bbVarUse; // variables used by block (before an assignment) VARSET_TP bbVarDef; // variables assigned by block (before a use) VARSET_TP bbLiveIn; // variables live on entry VARSET_TP bbLiveOut; // variables live on exit // Use, def, live in/out information for the implicit memory variable. MemoryKindSet bbMemoryUse : MemoryKindCount; // must be set for any MemoryKinds this block references MemoryKindSet bbMemoryDef : MemoryKindCount; // must be set for any MemoryKinds this block mutates MemoryKindSet bbMemoryLiveIn : MemoryKindCount; MemoryKindSet bbMemoryLiveOut : MemoryKindCount; MemoryKindSet bbMemoryHavoc : MemoryKindCount; // If true, at some point the block does an operation // that leaves memory in an unknown state. (E.g., // unanalyzed call, store through unknown pointer...) // We want to make phi functions for the special implicit var memory. But since this is not a real // lclVar, and thus has no local #, we can't use a GenTreePhiArg. Instead, we use this struct. struct MemoryPhiArg { unsigned m_ssaNum; // SSA# for incoming value. MemoryPhiArg* m_nextArg; // Next arg in the list, else NULL. unsigned GetSsaNum() { return m_ssaNum; } MemoryPhiArg(unsigned ssaNum, MemoryPhiArg* nextArg = nullptr) : m_ssaNum(ssaNum), m_nextArg(nextArg) { } void* operator new(size_t sz, class Compiler* comp); }; static MemoryPhiArg* EmptyMemoryPhiDef; // Special value (0x1, FWIW) to represent a to-be-filled in Phi arg list // for Heap. MemoryPhiArg* bbMemorySsaPhiFunc[MemoryKindCount]; // If the "in" Heap SSA var is not a phi definition, this value // is NULL. // Otherwise, it is either the special value EmptyMemoryPhiDefn, to indicate // that Heap needs a phi definition on entry, or else it is the linked list // of the phi arguments. unsigned bbMemorySsaNumIn[MemoryKindCount]; // The SSA # of memory on entry to the block. unsigned bbMemorySsaNumOut[MemoryKindCount]; // The SSA # of memory on exit from the block. VARSET_TP bbScope; // variables in scope over the block void InitVarSets(class Compiler* comp); /* The following are the standard bit sets for dataflow analysis. * We perform CSE and range-checks at the same time * and assertion propagation separately, * thus we can union them since the two operations are completely disjunct. */ union { EXPSET_TP bbCseGen; // CSEs computed by block ASSERT_TP bbAssertionGen; // value assignments computed by block }; union { EXPSET_TP bbCseIn; // CSEs available on entry ASSERT_TP bbAssertionIn; // value assignments available on entry }; union { EXPSET_TP bbCseOut; // CSEs available on exit ASSERT_TP bbAssertionOut; // value assignments available on exit }; void* bbEmitCookie; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) void* bbUnwindNopEmitCookie; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef VERIFIER stackDesc bbStackIn; // stack descriptor for input stackDesc bbStackOut; // stack descriptor for output verTypeVal* bbTypesIn; // list of variable types on input verTypeVal* bbTypesOut; // list of variable types on output #endif // VERIFIER //------------------------------------------------------------------------- #if MEASURE_BLOCK_SIZE static size_t s_Size; static size_t s_Count; #endif // MEASURE_BLOCK_SIZE bool bbFallsThrough() const; // Our slop fraction is 1/128 of the block weight rounded off static weight_t GetSlopFraction(weight_t weightBlk) { return ((weightBlk + 64) / 128); } // Given an the edge b1 -> b2, calculate the slop fraction by // using the higher of the two block weights static weight_t GetSlopFraction(BasicBlock* b1, BasicBlock* b2) { return GetSlopFraction(max(b1->bbWeight, b2->bbWeight)); } #ifdef DEBUG unsigned bbTgtStkDepth; // Native stack depth on entry (for throw-blocks) static unsigned s_nMaxTrees; // The max # of tree nodes in any BB // This is used in integrity checks. We semi-randomly pick a traversal stamp, label all blocks // in the BB list with that stamp (in this field); then we can tell if (e.g.) predecessors are // still in the BB list by whether they have the same stamp (with high probability). unsigned bbTraversalStamp; // bbID is a unique block identifier number that does not change: it does not get renumbered, like bbNum. unsigned bbID; #endif // DEBUG ThisInitState bbThisOnEntry() const; unsigned bbStackDepthOnEntry() const; void bbSetStack(void* stackBuffer); StackEntry* bbStackOnEntry() const; // "bbNum" is one-based (for unknown reasons); it is sometimes useful to have the corresponding // zero-based number for use as an array index. unsigned bbInd() const { assert(bbNum > 0); return bbNum - 1; } Statement* firstStmt() const; Statement* lastStmt() const; // Statements: convenience method for enabling range-based `for` iteration over the statement list, e.g.: // for (Statement* const stmt : block->Statements()) // StatementList Statements() const { return StatementList(firstStmt()); } // NonPhiStatements: convenience method for enabling range-based `for` iteration over the statement list, // excluding any initial PHI statements, e.g.: // for (Statement* const stmt : block->NonPhiStatements()) // StatementList NonPhiStatements() const { return StatementList(FirstNonPhiDef()); } GenTree* lastNode() const; bool endsWithJmpMethod(Compiler* comp) const; bool endsWithTailCall(Compiler* comp, bool fastTailCallsOnly, bool tailCallsConvertibleToLoopOnly, GenTree** tailCall) const; bool endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly = false) const; bool endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall) const; // Returns the first statement in the statement list of "this" that is // not an SSA definition (a lcl = phi(...) assignment). Statement* FirstNonPhiDef() const; Statement* FirstNonPhiDefOrCatchArgAsg() const; BasicBlock() : bbStmtList(nullptr), bbLiveIn(VarSetOps::UninitVal()), bbLiveOut(VarSetOps::UninitVal()) { } // Iteratable collection of successors of a block. template <typename TPosition> class Successors { Compiler* m_comp; BasicBlock* m_block; public: Successors(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block) { } class iterator { Compiler* m_comp; BasicBlock* m_block; TPosition m_pos; public: iterator(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block), m_pos(comp, block) { } iterator() : m_pos() { } void operator++(void) { m_pos.Advance(m_comp, m_block); } BasicBlock* operator*() { return m_pos.Current(m_comp, m_block); } bool operator==(const iterator& other) { return m_pos == other.m_pos; } bool operator!=(const iterator& other) { return m_pos != other.m_pos; } }; iterator begin() { return iterator(m_comp, m_block); } iterator end() { return iterator(); } }; Successors<EHSuccessorIterPosition> GetEHSuccs(Compiler* comp) { return Successors<EHSuccessorIterPosition>(comp, this); } Successors<AllSuccessorIterPosition> GetAllSuccs(Compiler* comp) { return Successors<AllSuccessorIterPosition>(comp, this); } // BBSuccList: adapter class for forward iteration of block successors, using range-based `for`, // normally used via BasicBlock::Succs(), e.g.: // for (BasicBlock* const target : block->Succs()) ... // class BBSuccList { // For one or two successors, pre-compute and stash the successors inline, in m_succs[], so we don't // need to call a function or execute another `switch` to get them. Also, pre-compute the begin and end // points of the iteration, for use by BBArrayIterator. `m_begin` and `m_end` will either point at // `m_succs` or at the switch table successor array. BasicBlock* m_succs[2]; BasicBlock* const* m_begin; BasicBlock* const* m_end; public: BBSuccList(const BasicBlock* block); BBArrayIterator begin() const; BBArrayIterator end() const; }; // BBCompilerSuccList: adapter class for forward iteration of block successors, using range-based `for`, // normally used via BasicBlock::Succs(), e.g.: // for (BasicBlock* const target : block->Succs(compiler)) ... // // This version uses NumSucc(Compiler*)/GetSucc(Compiler*). See the documentation there for the explanation // of the implications of this versus the version that does not take `Compiler*`. class BBCompilerSuccList { Compiler* m_comp; BasicBlock* m_block; // iterator: forward iterator for an array of BasicBlock*, such as the BBswtDesc->bbsDstTab. // class iterator { Compiler* m_comp; BasicBlock* m_block; unsigned m_succNum; public: iterator(Compiler* comp, BasicBlock* block, unsigned succNum) : m_comp(comp), m_block(block), m_succNum(succNum) { } BasicBlock* operator*() const { assert(m_block != nullptr); BasicBlock* bTarget = m_block->GetSucc(m_succNum, m_comp); assert(bTarget != nullptr); return bTarget; } iterator& operator++() { ++m_succNum; return *this; } bool operator!=(const iterator& i) const { return m_succNum != i.m_succNum; } }; public: BBCompilerSuccList(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block) { } iterator begin() const { return iterator(m_comp, m_block, 0); } iterator end() const { return iterator(m_comp, m_block, m_block->NumSucc(m_comp)); } }; // Succs: convenience methods for enabling range-based `for` iteration over a block's successors, e.g.: // for (BasicBlock* const succ : block->Succs()) ... // // There are two options: one that takes a Compiler* and one that doesn't. These correspond to the // NumSucc()/GetSucc() functions that do or do not take a Compiler*. See the comment for NumSucc()/GetSucc() // for the distinction. BBSuccList Succs() const { return BBSuccList(this); } BBCompilerSuccList Succs(Compiler* comp) { return BBCompilerSuccList(comp, this); } // Try to clone block state and statements from `from` block to `to` block (which must be new/empty), // optionally replacing uses of local `varNum` with IntCns `varVal`. Return true if all statements // in the block are cloned successfully, false (with partially-populated `to` block) if one fails. static bool CloneBlockState( Compiler* compiler, BasicBlock* to, const BasicBlock* from, unsigned varNum = (unsigned)-1, int varVal = 0); void MakeLIR(GenTree* firstNode, GenTree* lastNode); bool IsLIR() const; void SetDominatedByExceptionalEntryFlag() { bbFlags |= BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY; } bool IsDominatedByExceptionalEntryFlag() const { return (bbFlags & BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY) != 0; } #ifdef DEBUG bool Contains(const GenTree* node) const { assert(IsLIR()); for (Iterator iter = begin(); iter != end(); ++iter) { if (*iter == node) { return true; } } return false; } #endif // DEBUG }; template <> struct JitPtrKeyFuncs<BasicBlock> : public JitKeyFuncsDefEquals<const BasicBlock*> { public: // Make sure hashing is deterministic and not on "ptr." static unsigned GetHashCode(const BasicBlock* ptr); }; // A set of blocks. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, bool> BlkSet; // A vector of blocks. typedef jitstd::vector<BasicBlock*> BlkVector; // A map of block -> set of blocks, can be used as sparse block trees. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BlkSet*> BlkToBlkSetMap; // A map of block -> vector of blocks, can be used as sparse block trees. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BlkVector> BlkToBlkVectorMap; // Map from Block to Block. Used for a variety of purposes. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BasicBlock*> BlockToBlockMap; // BasicBlockIterator: forward iterator for the BasicBlock linked list. // It is allowed to make changes to the BasicBlock list as long as the current block remains in the list. // E.g., the current block `m_bbNext` pointer can be altered (such as when inserting a following block), // as long as the current block is still in the list. // The block list is expected to be properly doubly-linked. // class BasicBlockIterator { BasicBlock* m_block; public: BasicBlockIterator(BasicBlock* block) : m_block(block) { } BasicBlock* operator*() const { return m_block; } BasicBlockIterator& operator++() { assert(m_block != nullptr); // Check that we haven't been spliced out of the list. assert((m_block->bbNext == nullptr) || (m_block->bbNext->bbPrev == m_block)); assert((m_block->bbPrev == nullptr) || (m_block->bbPrev->bbNext == m_block)); m_block = m_block->bbNext; return *this; } bool operator!=(const BasicBlockIterator& i) const { return m_block != i.m_block; } }; // BasicBlockSimpleList: adapter class for forward iteration of a lexically contiguous range of // BasicBlock, starting at `begin` and going to the end of the function, using range-based `for`, // normally used via Compiler::Blocks(), e.g.: // for (BasicBlock* const block : Blocks()) ... // class BasicBlockSimpleList { BasicBlock* m_begin; public: BasicBlockSimpleList(BasicBlock* begin) : m_begin(begin) { } BasicBlockIterator begin() const { return BasicBlockIterator(m_begin); } BasicBlockIterator end() const { return BasicBlockIterator(nullptr); } }; // BasicBlockRangeList: adapter class for forward iteration of a lexically contiguous range of // BasicBlock specified with both `begin` and `end` blocks. `begin` and `end` are *inclusive* // and must be non-null. E.g., // for (BasicBlock* const block : BasicBlockRangeList(startBlock, endBlock)) ... // // Note that endBlock->bbNext is captured at the beginning of the iteration. Thus, any blocks // inserted before that will continue the iteration. In particular, inserting blocks between endBlock // and endBlock->bbNext will yield unexpected results, as the iteration will continue longer than desired. // class BasicBlockRangeList { BasicBlock* m_begin; BasicBlock* m_end; public: BasicBlockRangeList(BasicBlock* begin, BasicBlock* end) : m_begin(begin), m_end(end) { assert(begin != nullptr); assert(end != nullptr); } BasicBlockIterator begin() const { return BasicBlockIterator(m_begin); } BasicBlockIterator end() const { return BasicBlockIterator(m_end->bbNext); // walk until we see the block *following* the `m_end` block } }; // BBswtDesc -- descriptor for a switch block // // Things to know: // 1. If bbsHasDefault is true, the default case is the last one in the array of basic block addresses // namely bbsDstTab[bbsCount - 1]. // 2. bbsCount must be at least 1, for the default case. bbsCount cannot be zero. It appears that the ECMA spec // allows for a degenerate switch with zero cases. Normally, the optimizer will optimize degenerate // switches with just a default case to a BBJ_ALWAYS branch, and a switch with just two cases to a BBJ_COND. // However, in debuggable code, we might not do that, so bbsCount might be 1. // struct BBswtDesc { BasicBlock** bbsDstTab; // case label table address unsigned bbsCount; // count of cases (includes 'default' if bbsHasDefault) // Case number and likelihood of most likely case // (only known with PGO, only valid if bbsHasDominantCase is true) unsigned bbsDominantCase; weight_t bbsDominantFraction; bool bbsHasDefault; // true if last switch case is a default case bool bbsHasDominantCase; // true if switch has a dominant case BBswtDesc() : bbsHasDefault(true), bbsHasDominantCase(false) { } BBswtDesc(Compiler* comp, const BBswtDesc* other); void removeDefault() { assert(bbsHasDefault); assert(bbsCount > 0); bbsHasDefault = false; bbsCount--; } BasicBlock* getDefault() { assert(bbsHasDefault); assert(bbsCount > 0); return bbsDstTab[bbsCount - 1]; } }; // BBSwitchTargetList out-of-class-declaration implementations (here due to C++ ordering requirements). // inline BBSwitchTargetList::BBSwitchTargetList(BBswtDesc* bbsDesc) : m_bbsDesc(bbsDesc) { assert(m_bbsDesc != nullptr); assert(m_bbsDesc->bbsDstTab != nullptr); } inline BBArrayIterator BBSwitchTargetList::begin() const { return BBArrayIterator(m_bbsDesc->bbsDstTab); } inline BBArrayIterator BBSwitchTargetList::end() const { return BBArrayIterator(m_bbsDesc->bbsDstTab + m_bbsDesc->bbsCount); } // BBSuccList out-of-class-declaration implementations // inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) { assert(block != nullptr); switch (block->bbJumpKind) { case BBJ_THROW: case BBJ_RETURN: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: // We don't need m_succs. m_begin = nullptr; m_end = nullptr; break; case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: m_succs[0] = block->bbJumpDest; m_begin = &m_succs[0]; m_end = &m_succs[1]; break; case BBJ_NONE: m_succs[0] = block->bbNext; m_begin = &m_succs[0]; m_end = &m_succs[1]; break; case BBJ_COND: m_succs[0] = block->bbNext; m_begin = &m_succs[0]; // If both fall-through and branch successors are identical, then only include // them once in the iteration (this is the same behavior as NumSucc()/GetSucc()). if (block->bbJumpDest == block->bbNext) { m_end = &m_succs[1]; } else { m_succs[1] = block->bbJumpDest; m_end = &m_succs[2]; } break; case BBJ_SWITCH: // We don't use the m_succs in-line data for switches; use the existing jump table in the block. assert(block->bbJumpSwt != nullptr); assert(block->bbJumpSwt->bbsDstTab != nullptr); m_begin = block->bbJumpSwt->bbsDstTab; m_end = block->bbJumpSwt->bbsDstTab + block->bbJumpSwt->bbsCount; break; default: unreached(); } assert(m_end >= m_begin); } inline BBArrayIterator BasicBlock::BBSuccList::begin() const { return BBArrayIterator(m_begin); } inline BBArrayIterator BasicBlock::BBSuccList::end() const { return BBArrayIterator(m_end); } // In compiler terminology the control flow between two BasicBlocks // is typically referred to as an "edge". Most well known are the // backward branches for loops, which are often called "back-edges". // // "struct flowList" is the type that represents our control flow edges. // This type is a linked list of zero or more "edges". // (The list of zero edges is represented by NULL.) // Every BasicBlock has a field called bbPreds of this type. This field // represents the list of "edges" that flow into this BasicBlock. // The flowList type only stores the BasicBlock* of the source for the // control flow edge. The destination block for the control flow edge // is implied to be the block which contained the bbPreds field. // // For a switch branch target there may be multiple "edges" that have // the same source block (and destination block). We need to count the // number of these edges so that during optimization we will know when // we have zero of them. Rather than have extra flowList entries we // increment the flDupCount field. // // When we have Profile weight for the BasicBlocks we can usually compute // the number of times each edge was executed by examining the adjacent // BasicBlock weights. As we are doing for BasicBlocks, we call the number // of times that a control flow edge was executed the "edge weight". // In order to compute the edge weights we need to use a bounded range // for every edge weight. These two fields, 'flEdgeWeightMin' and 'flEdgeWeightMax' // are used to hold a bounded range. Most often these will converge such // that both values are the same and that value is the exact edge weight. // Sometimes we are left with a rage of possible values between [Min..Max] // which represents an inexact edge weight. // // The bbPreds list is initially created by Compiler::fgComputePreds() // and is incrementally kept up to date. // // The edge weight are computed by Compiler::fgComputeEdgeWeights() // the edge weights are used to straighten conditional branches // by Compiler::fgReorderBlocks() // // We have a simpler struct, BasicBlockList, which is simply a singly-linked // list of blocks. This is used for various purposes, but one is as a "cheap" // predecessor list, computed by fgComputeCheapPreds(), and stored as a list // on BasicBlock pointed to by bbCheapPreds. struct BasicBlockList { BasicBlockList* next; // The next BasicBlock in the list, nullptr for end of list. BasicBlock* block; // The BasicBlock of interest. BasicBlockList() : next(nullptr), block(nullptr) { } BasicBlockList(BasicBlock* blk, BasicBlockList* rest) : next(rest), block(blk) { } }; // flowList -- control flow edge // struct flowList { public: flowList* flNext; // The next BasicBlock in the list, nullptr for end of list. private: BasicBlock* m_block; // The BasicBlock of interest. weight_t flEdgeWeightMin; weight_t flEdgeWeightMax; public: unsigned flDupCount; // The count of duplicate "edges" (use only for switch stmts) public: BasicBlock* getBlock() const { return m_block; } void setBlock(BasicBlock* newBlock) { m_block = newBlock; } weight_t edgeWeightMin() const { return flEdgeWeightMin; } weight_t edgeWeightMax() const { return flEdgeWeightMax; } // These two methods are used to set new values for flEdgeWeightMin and flEdgeWeightMax // they are used only during the computation of the edge weights // They return false if the newWeight is not between the current [min..max] // when slop is non-zero we allow for the case where our weights might be off by 'slop' // bool setEdgeWeightMinChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop); bool setEdgeWeightMaxChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop); void setEdgeWeights(weight_t newMinWeight, weight_t newMaxWeight, BasicBlock* bDst); flowList(BasicBlock* block, flowList* rest) : flNext(rest), m_block(block), flEdgeWeightMin(0), flEdgeWeightMax(0), flDupCount(0) { } }; // Pred list iterator implementations (that are required to be defined after the declaration of BasicBlock and flowList) inline PredEdgeList::iterator::iterator(flowList* pred) : m_pred(pred) { #ifdef DEBUG m_next = (m_pred == nullptr) ? nullptr : m_pred->flNext; #endif } inline PredEdgeList::iterator& PredEdgeList::iterator::operator++() { flowList* next = m_pred->flNext; #ifdef DEBUG // Check that the next block is the one we expect to see. assert(next == m_next); m_next = (next == nullptr) ? nullptr : next->flNext; #endif // DEBUG m_pred = next; return *this; } inline PredBlockList::iterator::iterator(flowList* pred) : m_pred(pred) { #ifdef DEBUG m_next = (m_pred == nullptr) ? nullptr : m_pred->flNext; #endif } inline BasicBlock* PredBlockList::iterator::operator*() const { return m_pred->getBlock(); } inline PredBlockList::iterator& PredBlockList::iterator::operator++() { flowList* next = m_pred->flNext; #ifdef DEBUG // Check that the next block is the one we expect to see. assert(next == m_next); m_next = (next == nullptr) ? nullptr : next->flNext; #endif // DEBUG m_pred = next; return *this; } // This enum represents a pre/post-visit action state to emulate a depth-first // spanning tree traversal of a tree or graph. enum DfsStackState { DSS_Invalid, // The initialized, invalid error state DSS_Pre, // The DFS pre-order (first visit) traversal state DSS_Post // The DFS post-order (last visit) traversal state }; // These structs represents an entry in a stack used to emulate a non-recursive // depth-first spanning tree traversal of a graph. The entry contains either a // block pointer or a block number depending on which is more useful. struct DfsBlockEntry { DfsStackState dfsStackState; // The pre/post traversal action for this entry BasicBlock* dfsBlock; // The corresponding block for the action DfsBlockEntry(DfsStackState state, BasicBlock* basicBlock) : dfsStackState(state), dfsBlock(basicBlock) { } }; /***************************************************************************** * * The following call-backs supplied by the client; it's used by the code * emitter to convert a basic block to its corresponding emitter cookie. */ void* emitCodeGetCookie(BasicBlock* block); AllSuccessorIterPosition::AllSuccessorIterPosition(Compiler* comp, BasicBlock* block) : m_numNormSuccs(block->NumSucc(comp)), m_remainingNormSucc(m_numNormSuccs), m_ehIter(comp, block) { if (CurTryIsBlkCallFinallyTarget(comp, block)) { m_ehIter.Advance(comp, block); } } bool AllSuccessorIterPosition::CurTryIsBlkCallFinallyTarget(Compiler* comp, BasicBlock* block) { return (block->bbJumpKind == BBJ_CALLFINALLY) && (m_ehIter != EHSuccessorIterPosition()) && (block->bbJumpDest == m_ehIter.Current(comp, block)); } void AllSuccessorIterPosition::Advance(Compiler* comp, BasicBlock* block) { if (m_remainingNormSucc > 0) { m_remainingNormSucc--; } else { m_ehIter.Advance(comp, block); // If the original block whose successors we're iterating over // is a BBJ_CALLFINALLY, that finally clause's first block // will be yielded as a normal successor. Don't also yield as // an exceptional successor. if (CurTryIsBlkCallFinallyTarget(comp, block)) { m_ehIter.Advance(comp, block); } } } // Requires that "this" is not equal to the standard "end" iterator. Returns the // current successor. BasicBlock* AllSuccessorIterPosition::Current(Compiler* comp, BasicBlock* block) { if (m_remainingNormSucc > 0) { return block->GetSucc(m_numNormSuccs - m_remainingNormSucc, comp); } else { return m_ehIter.Current(comp, block); } } typedef BasicBlock::Successors<EHSuccessorIterPosition>::iterator EHSuccessorIter; typedef BasicBlock::Successors<AllSuccessorIterPosition>::iterator AllSuccessorIter; // An enumerator of a block's all successors. In some cases (e.g. SsaBuilder::TopologicalSort) // using iterators is not exactly efficient, at least because they contain an unnecessary // member - a pointer to the Compiler object. class AllSuccessorEnumerator { BasicBlock* m_block; AllSuccessorIterPosition m_pos; public: // Constructs an enumerator of all `block`'s successors. AllSuccessorEnumerator(Compiler* comp, BasicBlock* block) : m_block(block), m_pos(comp, block) { } // Gets the block whose successors are enumerated. BasicBlock* Block() { return m_block; } // Returns true if the next successor is an EH successor. bool IsNextEHSuccessor() { return m_pos.IsCurrentEH(); } // Returns the next available successor or `nullptr` if there are no more successors. BasicBlock* NextSuccessor(Compiler* comp) { if (!m_pos.HasCurrent()) { return nullptr; } BasicBlock* succ = m_pos.Current(comp, m_block); m_pos.Advance(comp, m_block); return succ; } }; // Simple dominator tree node that keeps track of a node's first child and next sibling. // The parent is provided by BasicBlock::bbIDom. struct DomTreeNode { BasicBlock* firstChild; BasicBlock* nextSibling; }; /*****************************************************************************/ #endif // _BLOCK_H_ /*****************************************************************************/
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX BasicBlock XX XX XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _BLOCK_H_ #define _BLOCK_H_ /*****************************************************************************/ #include "vartype.h" // For "var_types.h" #include "_typeinfo.h" /*****************************************************************************/ // Defines VARSET_TP #include "varset.h" #include "blockset.h" #include "jitstd.h" #include "bitvec.h" #include "jithashtable.h" /*****************************************************************************/ typedef BitVec EXPSET_TP; typedef BitVec_ValArg_T EXPSET_VALARG_TP; typedef BitVec_ValRet_T EXPSET_VALRET_TP; #define EXPSET_SZ 64 typedef BitVec ASSERT_TP; typedef BitVec_ValArg_T ASSERT_VALARG_TP; typedef BitVec_ValRet_T ASSERT_VALRET_TP; // We use the following format when printing the BasicBlock number: bbNum // This define is used with string concatenation to put this in printf format strings (Note that %u means unsigned int) #define FMT_BB "BB%02u" // Use this format for loop table indices. #define FMT_LP "L%02u" // And this format for profile weights #define FMT_WT "%.7g" /***************************************************************************** * * Each basic block ends with a jump which is described as a value * of the following enumeration. */ // clang-format off enum BBjumpKinds : BYTE { BBJ_EHFINALLYRET,// block ends with 'endfinally' (for finally or fault) BBJ_EHFILTERRET, // block ends with 'endfilter' BBJ_EHCATCHRET, // block ends with a leave out of a catch (only #if defined(FEATURE_EH_FUNCLETS)) BBJ_THROW, // block ends with 'throw' BBJ_RETURN, // block ends with 'ret' BBJ_NONE, // block flows into the next one (no jump) BBJ_ALWAYS, // block always jumps to the target BBJ_LEAVE, // block always jumps to the target, maybe out of guarded region. Only used until importing. BBJ_CALLFINALLY, // block always calls the target finally BBJ_COND, // block conditionally jumps to the target BBJ_SWITCH, // block ends with a switch statement BBJ_COUNT }; // clang-format on struct GenTree; struct Statement; struct BasicBlock; class Compiler; class typeInfo; struct BasicBlockList; struct flowList; struct EHblkDsc; struct BBswtDesc; struct StackEntry { GenTree* val; typeInfo seTypeInfo; }; /*****************************************************************************/ enum ThisInitState { TIS_Bottom, // We don't know anything about the 'this' pointer. TIS_Uninit, // The 'this' pointer for this constructor is known to be uninitialized. TIS_Init, // The 'this' pointer for this constructor is known to be initialized. TIS_Top, // This results from merging the state of two blocks one with TIS_Unint and the other with TIS_Init. // We use this in fault blocks to prevent us from accessing the 'this' pointer, but otherwise // allowing the fault block to generate code. }; struct EntryState { ThisInitState thisInitialized; // used to track whether the this ptr is initialized. unsigned esStackDepth; // size of esStack StackEntry* esStack; // ptr to stack }; // Enumeration of the kinds of memory whose state changes the compiler tracks enum MemoryKind { ByrefExposed = 0, // Includes anything byrefs can read/write (everything in GcHeap, address-taken locals, // unmanaged heap, callers' locals, etc.) GcHeap, // Includes actual GC heap, and also static fields MemoryKindCount, // Number of MemoryKinds }; #ifdef DEBUG const char* const memoryKindNames[] = {"ByrefExposed", "GcHeap"}; #endif // DEBUG // Bitmask describing a set of memory kinds (usable in bitfields) typedef unsigned int MemoryKindSet; // Bitmask for a MemoryKindSet containing just the specified MemoryKind inline MemoryKindSet memoryKindSet(MemoryKind memoryKind) { return (1U << memoryKind); } // Bitmask for a MemoryKindSet containing the specified MemoryKinds template <typename... MemoryKinds> inline MemoryKindSet memoryKindSet(MemoryKind memoryKind, MemoryKinds... memoryKinds) { return memoryKindSet(memoryKind) | memoryKindSet(memoryKinds...); } // Bitmask containing all the MemoryKinds const MemoryKindSet fullMemoryKindSet = (1 << MemoryKindCount) - 1; // Bitmask containing no MemoryKinds const MemoryKindSet emptyMemoryKindSet = 0; // Standard iterator class for iterating through MemoryKinds class MemoryKindIterator { int value; public: explicit inline MemoryKindIterator(int val) : value(val) { } inline MemoryKindIterator& operator++() { ++value; return *this; } inline MemoryKindIterator operator++(int) { return MemoryKindIterator(value++); } inline MemoryKind operator*() { return static_cast<MemoryKind>(value); } friend bool operator==(const MemoryKindIterator& left, const MemoryKindIterator& right) { return left.value == right.value; } friend bool operator!=(const MemoryKindIterator& left, const MemoryKindIterator& right) { return left.value != right.value; } }; // Empty struct that allows enumerating memory kinds via `for(MemoryKind kind : allMemoryKinds())` struct allMemoryKinds { inline allMemoryKinds() { } inline MemoryKindIterator begin() { return MemoryKindIterator(0); } inline MemoryKindIterator end() { return MemoryKindIterator(MemoryKindCount); } }; // This encapsulates the "exception handling" successors of a block. That is, // if a basic block BB1 occurs in a try block, we consider the first basic block // BB2 of the corresponding handler to be an "EH successor" of BB1. Because we // make the conservative assumption that control flow can jump from a try block // to its handler at any time, the immediate (regular control flow) // predecessor(s) of the the first block of a try block are also considered to // have the first block of the handler as an EH successor. This makes variables that // are "live-in" to the handler become "live-out" for these try-predecessor block, // so that they become live-in to the try -- which we require. // // This class maintains the minimum amount of state necessary to implement // successor iteration. The basic block whose successors are enumerated and // the compiler need to be provided by Advance/Current's callers. In addition // to iterators, this allows the use of other approaches that are more space // efficient. class EHSuccessorIterPosition { // The number of "regular" (i.e., non-exceptional) successors that remain to // be considered. If BB1 has successor BB2, and BB2 is the first block of a // try block, then we consider the catch block of BB2's try to be an EH // successor of BB1. This captures the iteration over the successors of BB1 // for this purpose. (In reverse order; we're done when this field is 0). unsigned m_remainingRegSuccs; // The current "regular" successor of "m_block" that we're considering. BasicBlock* m_curRegSucc; // The current try block. If non-null, then the current successor "m_curRegSucc" // is the first block of the handler of this block. While this try block has // enclosing try's that also start with "m_curRegSucc", the corresponding handlers will be // further EH successors. EHblkDsc* m_curTry; // Requires that "m_curTry" is NULL. Determines whether there is, as // discussed just above, a regular successor that's the first block of a // try; if so, sets "m_curTry" to that try block. (As noted above, selecting // the try containing the current regular successor as the "current try" may cause // multiple first-blocks of catches to be yielded as EH successors: trys enclosing // the current try are also included if they also start with the current EH successor.) void FindNextRegSuccTry(Compiler* comp, BasicBlock* block); public: // Constructs a position that "points" to the first EH successor of `block`. EHSuccessorIterPosition(Compiler* comp, BasicBlock* block); // Constructs a position that "points" past the last EH successor of `block` ("end" position). EHSuccessorIterPosition() : m_remainingRegSuccs(0), m_curTry(nullptr) { } // Go on to the next EH successor. void Advance(Compiler* comp, BasicBlock* block); // Returns the current EH successor. // Requires that "*this" is not equal to the "end" position. BasicBlock* Current(Compiler* comp, BasicBlock* block); // Returns "true" iff "*this" is equal to "ehsi". bool operator==(const EHSuccessorIterPosition& ehsi) { return m_curTry == ehsi.m_curTry && m_remainingRegSuccs == ehsi.m_remainingRegSuccs; } bool operator!=(const EHSuccessorIterPosition& ehsi) { return !((*this) == ehsi); } }; // Yields both normal and EH successors (in that order) in one iteration. // // This class maintains the minimum amount of state necessary to implement // successor iteration. The basic block whose successors are enumerated and // the compiler need to be provided by Advance/Current's callers. In addition // to iterators, this allows the use of other approaches that are more space // efficient. class AllSuccessorIterPosition { // Normal successor position unsigned m_numNormSuccs; unsigned m_remainingNormSucc; // EH successor position EHSuccessorIterPosition m_ehIter; // True iff m_blk is a BBJ_CALLFINALLY block, and the current try block of m_ehIter, // the first block of whose handler would be next yielded, is the jump target of m_blk. inline bool CurTryIsBlkCallFinallyTarget(Compiler* comp, BasicBlock* block); public: // Constructs a position that "points" to the first successor of `block`. inline AllSuccessorIterPosition(Compiler* comp, BasicBlock* block); // Constructs a position that "points" past the last successor of `block` ("end" position). AllSuccessorIterPosition() : m_remainingNormSucc(0), m_ehIter() { } // Go on to the next successor. inline void Advance(Compiler* comp, BasicBlock* block); // Returns the current successor. // Requires that "*this" is not equal to the "end" position. inline BasicBlock* Current(Compiler* comp, BasicBlock* block); bool IsCurrentEH() { return m_remainingNormSucc == 0; } bool HasCurrent() { return *this != AllSuccessorIterPosition(); } // Returns "true" iff "*this" is equal to "asi". bool operator==(const AllSuccessorIterPosition& asi) { return (m_remainingNormSucc == asi.m_remainingNormSucc) && (m_ehIter == asi.m_ehIter); } bool operator!=(const AllSuccessorIterPosition& asi) { return !((*this) == asi); } }; // PredEdgeList: adapter class for forward iteration of the predecessor edge linked list using range-based `for`, // normally used via BasicBlock::PredEdges(), e.g.: // for (flowList* const edge : block->PredEdges()) ... // class PredEdgeList { flowList* m_begin; // Forward iterator for the predecessor edges linked list. // The caller can't make changes to the preds list when using this. // class iterator { flowList* m_pred; #ifdef DEBUG // Try to guard against the user of the iterator from making changes to the IR that would invalidate // the iterator: cache the edge we think should be next, then check it when we actually do the `++` // operation. This is a bit conservative, but attempts to protect against callers assuming too much about // this iterator implementation. flowList* m_next; #endif public: iterator(flowList* pred); flowList* operator*() const { return m_pred; } iterator& operator++(); bool operator!=(const iterator& i) const { return m_pred != i.m_pred; } }; public: PredEdgeList(flowList* pred) : m_begin(pred) { } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(nullptr); } }; // PredBlockList: adapter class for forward iteration of the predecessor edge linked list yielding // predecessor blocks, using range-based `for`, normally used via BasicBlock::PredBlocks(), e.g.: // for (BasicBlock* const predBlock : block->PredBlocks()) ... // class PredBlockList { flowList* m_begin; // Forward iterator for the predecessor edges linked list, yielding the predecessor block, not the edge. // The caller can't make changes to the preds list when using this. // class iterator { flowList* m_pred; #ifdef DEBUG // Try to guard against the user of the iterator from making changes to the IR that would invalidate // the iterator: cache the edge we think should be next, then check it when we actually do the `++` // operation. This is a bit conservative, but attempts to protect against callers assuming too much about // this iterator implementation. flowList* m_next; #endif public: iterator(flowList* pred); BasicBlock* operator*() const; iterator& operator++(); bool operator!=(const iterator& i) const { return m_pred != i.m_pred; } }; public: PredBlockList(flowList* pred) : m_begin(pred) { } iterator begin() const { return iterator(m_begin); } iterator end() const { return iterator(nullptr); } }; // BBArrayIterator: forward iterator for an array of BasicBlock*, such as the BBswtDesc->bbsDstTab. // It is an error (with assert) to yield a nullptr BasicBlock* in this array. // `m_bbEntry` can be nullptr, but it only makes sense if both the begin and end of an iteration range are nullptr // (meaning, no actual iteration will happen). // class BBArrayIterator { BasicBlock* const* m_bbEntry; public: BBArrayIterator(BasicBlock* const* bbEntry) : m_bbEntry(bbEntry) { } BasicBlock* operator*() const { assert(m_bbEntry != nullptr); BasicBlock* bTarget = *m_bbEntry; assert(bTarget != nullptr); return bTarget; } BBArrayIterator& operator++() { assert(m_bbEntry != nullptr); ++m_bbEntry; return *this; } bool operator!=(const BBArrayIterator& i) const { return m_bbEntry != i.m_bbEntry; } }; // BBSwitchTargetList: adapter class for forward iteration of switch targets, using range-based `for`, // normally used via BasicBlock::SwitchTargets(), e.g.: // for (BasicBlock* const target : block->SwitchTargets()) ... // class BBSwitchTargetList { BBswtDesc* m_bbsDesc; public: BBSwitchTargetList(BBswtDesc* bbsDesc); BBArrayIterator begin() const; BBArrayIterator end() const; }; //------------------------------------------------------------------------ // BasicBlockFlags: a bitmask of flags for BasicBlock // // clang-format off enum BasicBlockFlags : unsigned __int64 { #define MAKE_BBFLAG(bit) (1ULL << (bit)) BBF_EMPTY = 0, BBF_VISITED = MAKE_BBFLAG( 0), // BB visited during optimizations BBF_MARKED = MAKE_BBFLAG( 1), // BB marked during optimizations BBF_CHANGED = MAKE_BBFLAG( 2), // input/output of this block has changed BBF_REMOVED = MAKE_BBFLAG( 3), // BB has been removed from bb-list BBF_DONT_REMOVE = MAKE_BBFLAG( 4), // BB should not be removed during flow graph optimizations BBF_IMPORTED = MAKE_BBFLAG( 5), // BB byte-code has been imported BBF_INTERNAL = MAKE_BBFLAG( 6), // BB has been added by the compiler BBF_FAILED_VERIFICATION = MAKE_BBFLAG( 7), // BB has verification exception BBF_TRY_BEG = MAKE_BBFLAG( 8), // BB starts a 'try' block BBF_FUNCLET_BEG = MAKE_BBFLAG( 9), // BB is the beginning of a funclet BBF_HAS_NULLCHECK = MAKE_BBFLAG(10), // BB contains a null check BBF_HAS_SUPPRESSGC_CALL = MAKE_BBFLAG(11), // BB contains a call to a method with SuppressGCTransitionAttribute BBF_RUN_RARELY = MAKE_BBFLAG(12), // BB is rarely run (catch clauses, blocks with throws etc) BBF_LOOP_HEAD = MAKE_BBFLAG(13), // BB is the head of a loop BBF_LOOP_CALL0 = MAKE_BBFLAG(14), // BB starts a loop that sometimes won't call BBF_LOOP_CALL1 = MAKE_BBFLAG(15), // BB starts a loop that will always call BBF_HAS_LABEL = MAKE_BBFLAG(16), // BB needs a label BBF_LOOP_ALIGN = MAKE_BBFLAG(17), // Block is lexically the first block in a loop we intend to align. BBF_HAS_JMP = MAKE_BBFLAG(18), // BB executes a JMP instruction (instead of return) BBF_GC_SAFE_POINT = MAKE_BBFLAG(19), // BB has a GC safe point (a call). More abstractly, BB does not require a // (further) poll -- this may be because this BB has a call, or, in some // cases, because the BB occurs in a loop, and we've determined that all // paths in the loop body leading to BB include a call. BBF_HAS_IDX_LEN = MAKE_BBFLAG(20), // BB contains simple index or length expressions on an array local var. BBF_HAS_NEWARRAY = MAKE_BBFLAG(21), // BB contains 'new' of an array BBF_HAS_NEWOBJ = MAKE_BBFLAG(22), // BB contains 'new' of an object type. #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BBF_FINALLY_TARGET = MAKE_BBFLAG(23), // BB is the target of a finally return: where a finally will return during // non-exceptional flow. Because the ARM calling sequence for calling a // finally explicitly sets the return address to the finally target and jumps // to the finally, instead of using a call instruction, ARM needs this to // generate correct code at the finally target, to allow for proper stack // unwind from within a non-exceptional call to a finally. #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) BBF_BACKWARD_JUMP = MAKE_BBFLAG(24), // BB is surrounded by a backward jump/switch arc BBF_RETLESS_CALL = MAKE_BBFLAG(25), // BBJ_CALLFINALLY that will never return (and therefore, won't need a paired // BBJ_ALWAYS); see isBBCallAlwaysPair(). BBF_LOOP_PREHEADER = MAKE_BBFLAG(26), // BB is a loop preheader block BBF_COLD = MAKE_BBFLAG(27), // BB is cold BBF_PROF_WEIGHT = MAKE_BBFLAG(28), // BB weight is computed from profile data BBF_IS_LIR = MAKE_BBFLAG(29), // Set if the basic block contains LIR (as opposed to HIR) BBF_KEEP_BBJ_ALWAYS = MAKE_BBFLAG(30), // A special BBJ_ALWAYS block, used by EH code generation. Keep the jump kind // as BBJ_ALWAYS. Used for the paired BBJ_ALWAYS block following the // BBJ_CALLFINALLY block, as well as, on x86, the final step block out of a // finally. BBF_CLONED_FINALLY_BEGIN = MAKE_BBFLAG(31), // First block of a cloned finally region BBF_CLONED_FINALLY_END = MAKE_BBFLAG(32), // Last block of a cloned finally region BBF_HAS_CALL = MAKE_BBFLAG(33), // BB contains a call BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY = MAKE_BBFLAG(34), // Block is dominated by exceptional entry. BBF_BACKWARD_JUMP_TARGET = MAKE_BBFLAG(35), // Block is a target of a backward jump BBF_PATCHPOINT = MAKE_BBFLAG(36), // Block is a patchpoint BBF_HAS_CLASS_PROFILE = MAKE_BBFLAG(37), // BB contains a call needing a class profile BBF_PARTIAL_COMPILATION_PATCHPOINT = MAKE_BBFLAG(38), // Block is a partial compilation patchpoint BBF_HAS_ALIGN = MAKE_BBFLAG(39), // BB ends with 'align' instruction BBF_TAILCALL_SUCCESSOR = MAKE_BBFLAG(40), // BB has pred that has potential tail call BBF_BACKWARD_JUMP_SOURCE = MAKE_BBFLAG(41), // Block is a source of a backward jump // The following are sets of flags. // Flags that relate blocks to loop structure. BBF_LOOP_FLAGS = BBF_LOOP_PREHEADER | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_LOOP_ALIGN, // Flags to update when two blocks are compacted BBF_COMPACT_UPD = BBF_CHANGED | BBF_GC_SAFE_POINT | BBF_HAS_JMP | BBF_HAS_IDX_LEN | BBF_BACKWARD_JUMP | BBF_HAS_NEWARRAY | \ BBF_HAS_NEWOBJ | BBF_HAS_NULLCHECK, // Flags a block should not have had before it is split. BBF_SPLIT_NONEXIST = BBF_CHANGED | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_RETLESS_CALL | BBF_LOOP_PREHEADER | BBF_COLD, // Flags lost by the top block when a block is split. // Note, this is a conservative guess. // For example, the top block might or might not have BBF_GC_SAFE_POINT, // but we assume it does not have BBF_GC_SAFE_POINT any more. BBF_SPLIT_LOST = BBF_GC_SAFE_POINT | BBF_HAS_JMP | BBF_KEEP_BBJ_ALWAYS | BBF_CLONED_FINALLY_END, // Flags gained by the bottom block when a block is split. // Note, this is a conservative guess. // For example, the bottom block might or might not have BBF_HAS_NEWARRAY or BBF_HAS_NULLCHECK, // but we assume it has BBF_HAS_NEWARRAY and BBF_HAS_NULLCHECK. // TODO: Should BBF_RUN_RARELY be added to BBF_SPLIT_GAINED ? BBF_SPLIT_GAINED = BBF_DONT_REMOVE | BBF_HAS_JMP | BBF_BACKWARD_JUMP | BBF_HAS_IDX_LEN | BBF_HAS_NEWARRAY | BBF_PROF_WEIGHT | \ BBF_HAS_NEWOBJ | BBF_KEEP_BBJ_ALWAYS | BBF_CLONED_FINALLY_END | BBF_HAS_NULLCHECK | BBF_HAS_CLASS_PROFILE, }; inline constexpr BasicBlockFlags operator ~(BasicBlockFlags a) { return (BasicBlockFlags)(~(unsigned __int64)a); } inline constexpr BasicBlockFlags operator |(BasicBlockFlags a, BasicBlockFlags b) { return (BasicBlockFlags)((unsigned __int64)a | (unsigned __int64)b); } inline constexpr BasicBlockFlags operator &(BasicBlockFlags a, BasicBlockFlags b) { return (BasicBlockFlags)((unsigned __int64)a & (unsigned __int64)b); } inline BasicBlockFlags& operator |=(BasicBlockFlags& a, BasicBlockFlags b) { return a = (BasicBlockFlags)((unsigned __int64)a | (unsigned __int64)b); } inline BasicBlockFlags& operator &=(BasicBlockFlags& a, BasicBlockFlags b) { return a = (BasicBlockFlags)((unsigned __int64)a & (unsigned __int64)b); } // clang-format on //------------------------------------------------------------------------ // BasicBlock: describes a basic block in the flowgraph. // // Note that this type derives from LIR::Range in order to make the LIR // utilities that are polymorphic over basic block and scratch ranges // faster and simpler. // struct BasicBlock : private LIR::Range { friend class LIR; BasicBlock* bbNext; // next BB in ascending PC offset order BasicBlock* bbPrev; void setNext(BasicBlock* next) { bbNext = next; if (next) { next->bbPrev = this; } } BasicBlockFlags bbFlags; static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_LOST) == 0); static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_GAINED) == 0); unsigned bbNum; // the block's number unsigned bbRefs; // number of blocks that can reach here, either by fall-through or a branch. If this falls to zero, // the block is unreachable. bool isRunRarely() const { return ((bbFlags & BBF_RUN_RARELY) != 0); } bool isLoopHead() const { return ((bbFlags & BBF_LOOP_HEAD) != 0); } bool isLoopAlign() const { return ((bbFlags & BBF_LOOP_ALIGN) != 0); } void unmarkLoopAlign(Compiler* comp DEBUG_ARG(const char* reason)); bool hasAlign() const { return ((bbFlags & BBF_HAS_ALIGN) != 0); } #ifdef DEBUG void dspFlags(); // Print the flags unsigned dspCheapPreds(); // Print the predecessors (bbCheapPreds) unsigned dspPreds(); // Print the predecessors (bbPreds) void dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH // regions are printed: see NumSucc() for details. void dspJumpKind(); // Print the block jump kind (e.g., BBJ_NONE, BBJ_COND, etc.). // Print a simple basic block header for various output, including a list of predecessors and successors. void dspBlockHeader(Compiler* compiler, bool showKind = true, bool showFlags = false, bool showPreds = true); const char* dspToString(int blockNumPadding = 0); #endif // DEBUG #define BB_UNITY_WEIGHT 100.0 // how much a normal execute once block weighs #define BB_UNITY_WEIGHT_UNSIGNED 100 // how much a normal execute once block weighs #define BB_LOOP_WEIGHT_SCALE 8.0 // synthetic profile scale factor for loops #define BB_ZERO_WEIGHT 0.0 #define BB_MAX_WEIGHT FLT_MAX // maximum finite weight -- needs rethinking. weight_t bbWeight; // The dynamic execution weight of this block // getCalledCount -- get the value used to normalize weights for this method static weight_t getCalledCount(Compiler* comp); // getBBWeight -- get the normalized weight of this block weight_t getBBWeight(Compiler* comp); // hasProfileWeight -- Returns true if this block's weight came from profile data bool hasProfileWeight() const { return ((this->bbFlags & BBF_PROF_WEIGHT) != 0); } // setBBProfileWeight -- Set the profile-derived weight for a basic block // and update the run rarely flag as appropriate. void setBBProfileWeight(weight_t weight) { this->bbFlags |= BBF_PROF_WEIGHT; this->bbWeight = weight; if (weight == BB_ZERO_WEIGHT) { this->bbFlags |= BBF_RUN_RARELY; } else { this->bbFlags &= ~BBF_RUN_RARELY; } } // this block will inherit the same weight and relevant bbFlags as bSrc // void inheritWeight(BasicBlock* bSrc) { inheritWeightPercentage(bSrc, 100); } // Similar to inheritWeight(), but we're splitting a block (such as creating blocks for qmark removal). // So, specify a percentage (0 to 100) of the weight the block should inherit. // // Can be invoked as a self-rescale, eg: block->inheritWeightPecentage(block, 50)) // void inheritWeightPercentage(BasicBlock* bSrc, unsigned percentage) { assert(0 <= percentage && percentage <= 100); this->bbWeight = (bSrc->bbWeight * percentage) / 100; if (bSrc->hasProfileWeight()) { this->bbFlags |= BBF_PROF_WEIGHT; } else { this->bbFlags &= ~BBF_PROF_WEIGHT; } if (this->bbWeight == BB_ZERO_WEIGHT) { this->bbFlags |= BBF_RUN_RARELY; } else { this->bbFlags &= ~BBF_RUN_RARELY; } } // Scale a blocks' weight by some factor. // void scaleBBWeight(weight_t scale) { this->bbWeight = this->bbWeight * scale; if (this->bbWeight == BB_ZERO_WEIGHT) { this->bbFlags |= BBF_RUN_RARELY; } else { this->bbFlags &= ~BBF_RUN_RARELY; } } // Set block weight to zero, and set run rarely flag. // void bbSetRunRarely() { this->scaleBBWeight(BB_ZERO_WEIGHT); } // makeBlockHot() // This is used to override any profiling data // and force a block to be in the hot region. // We only call this method for handler entry point // and only when HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION is 1. // Doing this helps fgReorderBlocks() by telling // it to try to move these blocks into the hot region. // Note that we do this strictly as an optimization, // not for correctness. fgDetermineFirstColdBlock() // will find all handler entry points and ensure that // for now we don't place them in the cold section. // void makeBlockHot() { if (this->bbWeight == BB_ZERO_WEIGHT) { this->bbFlags &= ~BBF_RUN_RARELY; // Clear any RarelyRun flag this->bbFlags &= ~BBF_PROF_WEIGHT; // Clear any profile-derived flag this->bbWeight = 1; } } bool isMaxBBWeight() const { return (bbWeight >= BB_MAX_WEIGHT); } // Returns "true" if the block is empty. Empty here means there are no statement // trees *except* PHI definitions. bool isEmpty() const; bool isValid() const; // Returns "true" iff "this" is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair -- // a block corresponding to an exit from the try of a try/finally. bool isBBCallAlwaysPair() const; // Returns "true" iff "this" is the last block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair -- // a block corresponding to an exit from the try of a try/finally. bool isBBCallAlwaysPairTail() const; BBjumpKinds bbJumpKind; // jump (if any) at the end of this block /* The following union describes the jump target(s) of this block */ union { unsigned bbJumpOffs; // PC offset (temporary only) BasicBlock* bbJumpDest; // basic block BBswtDesc* bbJumpSwt; // switch descriptor }; bool KindIs(BBjumpKinds kind) const { return bbJumpKind == kind; } template <typename... T> bool KindIs(BBjumpKinds kind, T... rest) const { return KindIs(kind) || KindIs(rest...); } // NumSucc() gives the number of successors, and GetSucc() returns a given numbered successor. // // There are two versions of these functions: ones that take a Compiler* and ones that don't. You must // always use a matching set. Thus, if you call NumSucc() without a Compiler*, you must also call // GetSucc() without a Compiler*. // // The behavior of NumSucc()/GetSucc() is different when passed a Compiler* for blocks that end in: // (1) BBJ_EHFINALLYRET (a return from a finally or fault block) // (2) BBJ_EHFILTERRET (a return from EH filter block) // (3) BBJ_SWITCH // // For BBJ_EHFINALLYRET, if no Compiler* is passed, then the block is considered to have no // successor. If Compiler* is passed, we figure out the actual successors. Some cases will want one behavior, // other cases the other. For example, IL verification requires that these blocks end in an empty operand // stack, and since the dataflow analysis of IL verification is concerned only with the contents of the // operand stack, we can consider the finally block to have no successors. But a more general dataflow // analysis that is tracking the contents of local variables might want to consider *all* successors, // and would pass the current Compiler object. // // Similarly, BBJ_EHFILTERRET blocks are assumed to have no successors if Compiler* is not passed; if // Compiler* is passed, NumSucc/GetSucc yields the first block of the try block's handler. // // For BBJ_SWITCH, if Compiler* is not passed, then all switch successors are returned. If Compiler* // is passed, then only unique switch successors are returned; the duplicate successors are omitted. // // Note that for BBJ_COND, which has two successors (fall through and condition true branch target), // only the unique targets are returned. Thus, if both targets are the same, NumSucc() will only return 1 // instead of 2. // // NumSucc: Returns the number of successors of "this". unsigned NumSucc() const; unsigned NumSucc(Compiler* comp); // GetSucc: Returns the "i"th successor. Requires (0 <= i < NumSucc()). BasicBlock* GetSucc(unsigned i) const; BasicBlock* GetSucc(unsigned i, Compiler* comp); // SwitchTargets: convenience methods for enabling range-based `for` iteration over a switch block's targets, e.g.: // for (BasicBlock* const bTarget : block->SwitchTargets()) ... // BBSwitchTargetList SwitchTargets() const { assert(bbJumpKind == BBJ_SWITCH); return BBSwitchTargetList(bbJumpSwt); } BasicBlock* GetUniquePred(Compiler* comp) const; BasicBlock* GetUniqueSucc() const; unsigned countOfInEdges() const { return bbRefs; } Statement* bbStmtList; GenTree* GetFirstLIRNode() const { return m_firstNode; } void SetFirstLIRNode(GenTree* tree) { m_firstNode = tree; } union { EntryState* bbEntryState; // verifier tracked state of all entries in stack. flowList* bbLastPred; // last pred list entry }; #define NO_BASE_TMP UINT_MAX // base# to use when we have none union { unsigned bbStkTempsIn; // base# for input stack temps int bbCountSchemaIndex; // schema index for count instrumentation }; union { unsigned bbStkTempsOut; // base# for output stack temps int bbClassSchemaIndex; // schema index for class instrumentation }; #define MAX_XCPTN_INDEX (USHRT_MAX - 1) // It would be nice to make bbTryIndex and bbHndIndex private, but there is still code that uses them directly, // especially Compiler::fgNewBBinRegion() and friends. // index, into the compHndBBtab table, of innermost 'try' clause containing the BB (used for raising exceptions). // Stored as index + 1; 0 means "no try index". unsigned short bbTryIndex; // index, into the compHndBBtab table, of innermost handler (filter, catch, fault/finally) containing the BB. // Stored as index + 1; 0 means "no handler index". unsigned short bbHndIndex; // Given two EH indices that are either bbTryIndex or bbHndIndex (or related), determine if index1 might be more // deeply nested than index2. Both index1 and index2 are in the range [0..compHndBBtabCount], where 0 means // "main function" and otherwise the value is an index into compHndBBtab[]. Note that "sibling" EH regions will // have a numeric index relationship that doesn't indicate nesting, whereas a more deeply nested region must have // a lower index than the region it is nested within. Note that if you compare a single block's bbTryIndex and // bbHndIndex, there is guaranteed to be a nesting relationship, since that block can't be simultaneously in two // sibling EH regions. In that case, "maybe" is actually "definitely". static bool ehIndexMaybeMoreNested(unsigned index1, unsigned index2) { if (index1 == 0) { // index1 is in the main method. It can't be more deeply nested than index2. return false; } else if (index2 == 0) { // index1 represents an EH region, whereas index2 is the main method. Thus, index1 is more deeply nested. assert(index1 > 0); return true; } else { // If index1 has a smaller index, it might be more deeply nested than index2. assert(index1 > 0); assert(index2 > 0); return index1 < index2; } } // catch type: class token of handler, or one of BBCT_*. Only set on first block of catch handler. unsigned bbCatchTyp; bool hasTryIndex() const { return bbTryIndex != 0; } bool hasHndIndex() const { return bbHndIndex != 0; } unsigned getTryIndex() const { assert(bbTryIndex != 0); return bbTryIndex - 1; } unsigned getHndIndex() const { assert(bbHndIndex != 0); return bbHndIndex - 1; } void setTryIndex(unsigned val) { bbTryIndex = (unsigned short)(val + 1); assert(bbTryIndex != 0); } void setHndIndex(unsigned val) { bbHndIndex = (unsigned short)(val + 1); assert(bbHndIndex != 0); } void clearTryIndex() { bbTryIndex = 0; } void clearHndIndex() { bbHndIndex = 0; } void copyEHRegion(const BasicBlock* from) { bbTryIndex = from->bbTryIndex; bbHndIndex = from->bbHndIndex; } void copyTryIndex(const BasicBlock* from) { bbTryIndex = from->bbTryIndex; } void copyHndIndex(const BasicBlock* from) { bbHndIndex = from->bbHndIndex; } static bool sameTryRegion(const BasicBlock* blk1, const BasicBlock* blk2) { return blk1->bbTryIndex == blk2->bbTryIndex; } static bool sameHndRegion(const BasicBlock* blk1, const BasicBlock* blk2) { return blk1->bbHndIndex == blk2->bbHndIndex; } static bool sameEHRegion(const BasicBlock* blk1, const BasicBlock* blk2) { return sameTryRegion(blk1, blk2) && sameHndRegion(blk1, blk2); } bool hasEHBoundaryIn() const; bool hasEHBoundaryOut() const; // Some non-zero value that will not collide with real tokens for bbCatchTyp #define BBCT_NONE 0x00000000 #define BBCT_FAULT 0xFFFFFFFC #define BBCT_FINALLY 0xFFFFFFFD #define BBCT_FILTER 0xFFFFFFFE #define BBCT_FILTER_HANDLER 0xFFFFFFFF #define handlerGetsXcptnObj(hndTyp) ((hndTyp) != BBCT_NONE && (hndTyp) != BBCT_FAULT && (hndTyp) != BBCT_FINALLY) // The following fields are used for loop detection typedef unsigned char loopNumber; static const unsigned NOT_IN_LOOP = UCHAR_MAX; static const unsigned MAX_LOOP_NUM = 64; loopNumber bbNatLoopNum; // Index, in optLoopTable, of most-nested loop that contains this block, // or else NOT_IN_LOOP if this block is not in a loop. // TODO-Cleanup: Get rid of bbStkDepth and use bbStackDepthOnEntry() instead union { unsigned short bbStkDepth; // stack depth on entry unsigned short bbFPinVars; // number of inner enregistered FP vars }; // Basic block predecessor lists. Early in compilation, some phases might need to compute "cheap" predecessor // lists. These are stored in bbCheapPreds, computed by fgComputeCheapPreds(). If bbCheapPreds is valid, // 'fgCheapPredsValid' will be 'true'. Later, the "full" predecessor lists are created by fgComputePreds(), stored // in 'bbPreds', and then maintained throughout compilation. 'fgComputePredsDone' will be 'true' after the // full predecessor lists are created. See the comment at fgComputeCheapPreds() to see how those differ from // the "full" variant. union { BasicBlockList* bbCheapPreds; // ptr to list of cheap predecessors (used before normal preds are computed) flowList* bbPreds; // ptr to list of predecessors }; // PredEdges: convenience method for enabling range-based `for` iteration over predecessor edges, e.g.: // for (flowList* const edge : block->PredEdges()) ... // PredEdgeList PredEdges() const { return PredEdgeList(bbPreds); } // PredBlocks: convenience method for enabling range-based `for` iteration over predecessor blocks, e.g.: // for (BasicBlock* const predBlock : block->PredBlocks()) ... // PredBlockList PredBlocks() const { return PredBlockList(bbPreds); } // Pred list maintenance // bool checkPredListOrder(); void ensurePredListOrder(Compiler* compiler); void reorderPredList(Compiler* compiler); BlockSet bbReach; // Set of all blocks that can reach this one union { BasicBlock* bbIDom; // Represent the closest dominator to this block (called the Immediate // Dominator) used to compute the dominance tree. void* bbSparseProbeList; // Used early on by fgInstrument void* bbSparseCountInfo; // Used early on by fgIncorporateEdgeCounts }; unsigned bbPostOrderNum; // the block's post order number in the graph. IL_OFFSET bbCodeOffs; // IL offset of the beginning of the block IL_OFFSET bbCodeOffsEnd; // IL offset past the end of the block. Thus, the [bbCodeOffs..bbCodeOffsEnd) // range is not inclusive of the end offset. The count of IL bytes in the block // is bbCodeOffsEnd - bbCodeOffs, assuming neither are BAD_IL_OFFSET. #ifdef DEBUG void dspBlockILRange() const; // Display the block's IL range as [XXX...YYY), where XXX and YYY might be "???" for // BAD_IL_OFFSET. #endif // DEBUG VARSET_TP bbVarUse; // variables used by block (before an assignment) VARSET_TP bbVarDef; // variables assigned by block (before a use) VARSET_TP bbLiveIn; // variables live on entry VARSET_TP bbLiveOut; // variables live on exit // Use, def, live in/out information for the implicit memory variable. MemoryKindSet bbMemoryUse : MemoryKindCount; // must be set for any MemoryKinds this block references MemoryKindSet bbMemoryDef : MemoryKindCount; // must be set for any MemoryKinds this block mutates MemoryKindSet bbMemoryLiveIn : MemoryKindCount; MemoryKindSet bbMemoryLiveOut : MemoryKindCount; MemoryKindSet bbMemoryHavoc : MemoryKindCount; // If true, at some point the block does an operation // that leaves memory in an unknown state. (E.g., // unanalyzed call, store through unknown pointer...) // We want to make phi functions for the special implicit var memory. But since this is not a real // lclVar, and thus has no local #, we can't use a GenTreePhiArg. Instead, we use this struct. struct MemoryPhiArg { unsigned m_ssaNum; // SSA# for incoming value. MemoryPhiArg* m_nextArg; // Next arg in the list, else NULL. unsigned GetSsaNum() { return m_ssaNum; } MemoryPhiArg(unsigned ssaNum, MemoryPhiArg* nextArg = nullptr) : m_ssaNum(ssaNum), m_nextArg(nextArg) { } void* operator new(size_t sz, class Compiler* comp); }; static MemoryPhiArg* EmptyMemoryPhiDef; // Special value (0x1, FWIW) to represent a to-be-filled in Phi arg list // for Heap. MemoryPhiArg* bbMemorySsaPhiFunc[MemoryKindCount]; // If the "in" Heap SSA var is not a phi definition, this value // is NULL. // Otherwise, it is either the special value EmptyMemoryPhiDefn, to indicate // that Heap needs a phi definition on entry, or else it is the linked list // of the phi arguments. unsigned bbMemorySsaNumIn[MemoryKindCount]; // The SSA # of memory on entry to the block. unsigned bbMemorySsaNumOut[MemoryKindCount]; // The SSA # of memory on exit from the block. VARSET_TP bbScope; // variables in scope over the block void InitVarSets(class Compiler* comp); /* The following are the standard bit sets for dataflow analysis. * We perform CSE and range-checks at the same time * and assertion propagation separately, * thus we can union them since the two operations are completely disjunct. */ union { EXPSET_TP bbCseGen; // CSEs computed by block ASSERT_TP bbAssertionGen; // value assignments computed by block }; union { EXPSET_TP bbCseIn; // CSEs available on entry ASSERT_TP bbAssertionIn; // value assignments available on entry }; union { EXPSET_TP bbCseOut; // CSEs available on exit ASSERT_TP bbAssertionOut; // value assignments available on exit }; void* bbEmitCookie; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) void* bbUnwindNopEmitCookie; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef VERIFIER stackDesc bbStackIn; // stack descriptor for input stackDesc bbStackOut; // stack descriptor for output verTypeVal* bbTypesIn; // list of variable types on input verTypeVal* bbTypesOut; // list of variable types on output #endif // VERIFIER //------------------------------------------------------------------------- #if MEASURE_BLOCK_SIZE static size_t s_Size; static size_t s_Count; #endif // MEASURE_BLOCK_SIZE bool bbFallsThrough() const; // Our slop fraction is 1/128 of the block weight rounded off static weight_t GetSlopFraction(weight_t weightBlk) { return ((weightBlk + 64) / 128); } // Given an the edge b1 -> b2, calculate the slop fraction by // using the higher of the two block weights static weight_t GetSlopFraction(BasicBlock* b1, BasicBlock* b2) { return GetSlopFraction(max(b1->bbWeight, b2->bbWeight)); } #ifdef DEBUG unsigned bbTgtStkDepth; // Native stack depth on entry (for throw-blocks) static unsigned s_nMaxTrees; // The max # of tree nodes in any BB // This is used in integrity checks. We semi-randomly pick a traversal stamp, label all blocks // in the BB list with that stamp (in this field); then we can tell if (e.g.) predecessors are // still in the BB list by whether they have the same stamp (with high probability). unsigned bbTraversalStamp; // bbID is a unique block identifier number that does not change: it does not get renumbered, like bbNum. unsigned bbID; #endif // DEBUG ThisInitState bbThisOnEntry() const; unsigned bbStackDepthOnEntry() const; void bbSetStack(void* stackBuffer); StackEntry* bbStackOnEntry() const; // "bbNum" is one-based (for unknown reasons); it is sometimes useful to have the corresponding // zero-based number for use as an array index. unsigned bbInd() const { assert(bbNum > 0); return bbNum - 1; } Statement* firstStmt() const; Statement* lastStmt() const; // Statements: convenience method for enabling range-based `for` iteration over the statement list, e.g.: // for (Statement* const stmt : block->Statements()) // StatementList Statements() const { return StatementList(firstStmt()); } // NonPhiStatements: convenience method for enabling range-based `for` iteration over the statement list, // excluding any initial PHI statements, e.g.: // for (Statement* const stmt : block->NonPhiStatements()) // StatementList NonPhiStatements() const { return StatementList(FirstNonPhiDef()); } GenTree* lastNode() const; bool endsWithJmpMethod(Compiler* comp) const; bool endsWithTailCall(Compiler* comp, bool fastTailCallsOnly, bool tailCallsConvertibleToLoopOnly, GenTree** tailCall) const; bool endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly = false) const; bool endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall) const; // Returns the first statement in the statement list of "this" that is // not an SSA definition (a lcl = phi(...) assignment). Statement* FirstNonPhiDef() const; Statement* FirstNonPhiDefOrCatchArgAsg() const; BasicBlock() : bbStmtList(nullptr), bbLiveIn(VarSetOps::UninitVal()), bbLiveOut(VarSetOps::UninitVal()) { } // Iteratable collection of successors of a block. template <typename TPosition> class Successors { Compiler* m_comp; BasicBlock* m_block; public: Successors(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block) { } class iterator { Compiler* m_comp; BasicBlock* m_block; TPosition m_pos; public: iterator(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block), m_pos(comp, block) { } iterator() : m_pos() { } void operator++(void) { m_pos.Advance(m_comp, m_block); } BasicBlock* operator*() { return m_pos.Current(m_comp, m_block); } bool operator==(const iterator& other) { return m_pos == other.m_pos; } bool operator!=(const iterator& other) { return m_pos != other.m_pos; } }; iterator begin() { return iterator(m_comp, m_block); } iterator end() { return iterator(); } }; Successors<EHSuccessorIterPosition> GetEHSuccs(Compiler* comp) { return Successors<EHSuccessorIterPosition>(comp, this); } Successors<AllSuccessorIterPosition> GetAllSuccs(Compiler* comp) { return Successors<AllSuccessorIterPosition>(comp, this); } // BBSuccList: adapter class for forward iteration of block successors, using range-based `for`, // normally used via BasicBlock::Succs(), e.g.: // for (BasicBlock* const target : block->Succs()) ... // class BBSuccList { // For one or two successors, pre-compute and stash the successors inline, in m_succs[], so we don't // need to call a function or execute another `switch` to get them. Also, pre-compute the begin and end // points of the iteration, for use by BBArrayIterator. `m_begin` and `m_end` will either point at // `m_succs` or at the switch table successor array. BasicBlock* m_succs[2]; BasicBlock* const* m_begin; BasicBlock* const* m_end; public: BBSuccList(const BasicBlock* block); BBArrayIterator begin() const; BBArrayIterator end() const; }; // BBCompilerSuccList: adapter class for forward iteration of block successors, using range-based `for`, // normally used via BasicBlock::Succs(), e.g.: // for (BasicBlock* const target : block->Succs(compiler)) ... // // This version uses NumSucc(Compiler*)/GetSucc(Compiler*). See the documentation there for the explanation // of the implications of this versus the version that does not take `Compiler*`. class BBCompilerSuccList { Compiler* m_comp; BasicBlock* m_block; // iterator: forward iterator for an array of BasicBlock*, such as the BBswtDesc->bbsDstTab. // class iterator { Compiler* m_comp; BasicBlock* m_block; unsigned m_succNum; public: iterator(Compiler* comp, BasicBlock* block, unsigned succNum) : m_comp(comp), m_block(block), m_succNum(succNum) { } BasicBlock* operator*() const { assert(m_block != nullptr); BasicBlock* bTarget = m_block->GetSucc(m_succNum, m_comp); assert(bTarget != nullptr); return bTarget; } iterator& operator++() { ++m_succNum; return *this; } bool operator!=(const iterator& i) const { return m_succNum != i.m_succNum; } }; public: BBCompilerSuccList(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block) { } iterator begin() const { return iterator(m_comp, m_block, 0); } iterator end() const { return iterator(m_comp, m_block, m_block->NumSucc(m_comp)); } }; // Succs: convenience methods for enabling range-based `for` iteration over a block's successors, e.g.: // for (BasicBlock* const succ : block->Succs()) ... // // There are two options: one that takes a Compiler* and one that doesn't. These correspond to the // NumSucc()/GetSucc() functions that do or do not take a Compiler*. See the comment for NumSucc()/GetSucc() // for the distinction. BBSuccList Succs() const { return BBSuccList(this); } BBCompilerSuccList Succs(Compiler* comp) { return BBCompilerSuccList(comp, this); } // Try to clone block state and statements from `from` block to `to` block (which must be new/empty), // optionally replacing uses of local `varNum` with IntCns `varVal`. Return true if all statements // in the block are cloned successfully, false (with partially-populated `to` block) if one fails. static bool CloneBlockState( Compiler* compiler, BasicBlock* to, const BasicBlock* from, unsigned varNum = (unsigned)-1, int varVal = 0); void MakeLIR(GenTree* firstNode, GenTree* lastNode); bool IsLIR() const; void SetDominatedByExceptionalEntryFlag() { bbFlags |= BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY; } bool IsDominatedByExceptionalEntryFlag() const { return (bbFlags & BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY) != 0; } #ifdef DEBUG bool Contains(const GenTree* node) const { assert(IsLIR()); for (Iterator iter = begin(); iter != end(); ++iter) { if (*iter == node) { return true; } } return false; } #endif // DEBUG }; template <> struct JitPtrKeyFuncs<BasicBlock> : public JitKeyFuncsDefEquals<const BasicBlock*> { public: // Make sure hashing is deterministic and not on "ptr." static unsigned GetHashCode(const BasicBlock* ptr); }; // A set of blocks. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, bool> BlkSet; // A vector of blocks. typedef jitstd::vector<BasicBlock*> BlkVector; // A map of block -> set of blocks, can be used as sparse block trees. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BlkSet*> BlkToBlkSetMap; // A map of block -> vector of blocks, can be used as sparse block trees. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BlkVector> BlkToBlkVectorMap; // Map from Block to Block. Used for a variety of purposes. typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BasicBlock*> BlockToBlockMap; // BasicBlockIterator: forward iterator for the BasicBlock linked list. // It is allowed to make changes to the BasicBlock list as long as the current block remains in the list. // E.g., the current block `m_bbNext` pointer can be altered (such as when inserting a following block), // as long as the current block is still in the list. // The block list is expected to be properly doubly-linked. // class BasicBlockIterator { BasicBlock* m_block; public: BasicBlockIterator(BasicBlock* block) : m_block(block) { } BasicBlock* operator*() const { return m_block; } BasicBlockIterator& operator++() { assert(m_block != nullptr); // Check that we haven't been spliced out of the list. assert((m_block->bbNext == nullptr) || (m_block->bbNext->bbPrev == m_block)); assert((m_block->bbPrev == nullptr) || (m_block->bbPrev->bbNext == m_block)); m_block = m_block->bbNext; return *this; } bool operator!=(const BasicBlockIterator& i) const { return m_block != i.m_block; } }; // BasicBlockSimpleList: adapter class for forward iteration of a lexically contiguous range of // BasicBlock, starting at `begin` and going to the end of the function, using range-based `for`, // normally used via Compiler::Blocks(), e.g.: // for (BasicBlock* const block : Blocks()) ... // class BasicBlockSimpleList { BasicBlock* m_begin; public: BasicBlockSimpleList(BasicBlock* begin) : m_begin(begin) { } BasicBlockIterator begin() const { return BasicBlockIterator(m_begin); } BasicBlockIterator end() const { return BasicBlockIterator(nullptr); } }; // BasicBlockRangeList: adapter class for forward iteration of a lexically contiguous range of // BasicBlock specified with both `begin` and `end` blocks. `begin` and `end` are *inclusive* // and must be non-null. E.g., // for (BasicBlock* const block : BasicBlockRangeList(startBlock, endBlock)) ... // // Note that endBlock->bbNext is captured at the beginning of the iteration. Thus, any blocks // inserted before that will continue the iteration. In particular, inserting blocks between endBlock // and endBlock->bbNext will yield unexpected results, as the iteration will continue longer than desired. // class BasicBlockRangeList { BasicBlock* m_begin; BasicBlock* m_end; public: BasicBlockRangeList(BasicBlock* begin, BasicBlock* end) : m_begin(begin), m_end(end) { assert(begin != nullptr); assert(end != nullptr); } BasicBlockIterator begin() const { return BasicBlockIterator(m_begin); } BasicBlockIterator end() const { return BasicBlockIterator(m_end->bbNext); // walk until we see the block *following* the `m_end` block } }; // BBswtDesc -- descriptor for a switch block // // Things to know: // 1. If bbsHasDefault is true, the default case is the last one in the array of basic block addresses // namely bbsDstTab[bbsCount - 1]. // 2. bbsCount must be at least 1, for the default case. bbsCount cannot be zero. It appears that the ECMA spec // allows for a degenerate switch with zero cases. Normally, the optimizer will optimize degenerate // switches with just a default case to a BBJ_ALWAYS branch, and a switch with just two cases to a BBJ_COND. // However, in debuggable code, we might not do that, so bbsCount might be 1. // struct BBswtDesc { BasicBlock** bbsDstTab; // case label table address unsigned bbsCount; // count of cases (includes 'default' if bbsHasDefault) // Case number and likelihood of most likely case // (only known with PGO, only valid if bbsHasDominantCase is true) unsigned bbsDominantCase; weight_t bbsDominantFraction; bool bbsHasDefault; // true if last switch case is a default case bool bbsHasDominantCase; // true if switch has a dominant case BBswtDesc() : bbsHasDefault(true), bbsHasDominantCase(false) { } BBswtDesc(Compiler* comp, const BBswtDesc* other); void removeDefault() { assert(bbsHasDefault); assert(bbsCount > 0); bbsHasDefault = false; bbsCount--; } BasicBlock* getDefault() { assert(bbsHasDefault); assert(bbsCount > 0); return bbsDstTab[bbsCount - 1]; } }; // BBSwitchTargetList out-of-class-declaration implementations (here due to C++ ordering requirements). // inline BBSwitchTargetList::BBSwitchTargetList(BBswtDesc* bbsDesc) : m_bbsDesc(bbsDesc) { assert(m_bbsDesc != nullptr); assert(m_bbsDesc->bbsDstTab != nullptr); } inline BBArrayIterator BBSwitchTargetList::begin() const { return BBArrayIterator(m_bbsDesc->bbsDstTab); } inline BBArrayIterator BBSwitchTargetList::end() const { return BBArrayIterator(m_bbsDesc->bbsDstTab + m_bbsDesc->bbsCount); } // BBSuccList out-of-class-declaration implementations // inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) { assert(block != nullptr); switch (block->bbJumpKind) { case BBJ_THROW: case BBJ_RETURN: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: // We don't need m_succs. m_begin = nullptr; m_end = nullptr; break; case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: m_succs[0] = block->bbJumpDest; m_begin = &m_succs[0]; m_end = &m_succs[1]; break; case BBJ_NONE: m_succs[0] = block->bbNext; m_begin = &m_succs[0]; m_end = &m_succs[1]; break; case BBJ_COND: m_succs[0] = block->bbNext; m_begin = &m_succs[0]; // If both fall-through and branch successors are identical, then only include // them once in the iteration (this is the same behavior as NumSucc()/GetSucc()). if (block->bbJumpDest == block->bbNext) { m_end = &m_succs[1]; } else { m_succs[1] = block->bbJumpDest; m_end = &m_succs[2]; } break; case BBJ_SWITCH: // We don't use the m_succs in-line data for switches; use the existing jump table in the block. assert(block->bbJumpSwt != nullptr); assert(block->bbJumpSwt->bbsDstTab != nullptr); m_begin = block->bbJumpSwt->bbsDstTab; m_end = block->bbJumpSwt->bbsDstTab + block->bbJumpSwt->bbsCount; break; default: unreached(); } assert(m_end >= m_begin); } inline BBArrayIterator BasicBlock::BBSuccList::begin() const { return BBArrayIterator(m_begin); } inline BBArrayIterator BasicBlock::BBSuccList::end() const { return BBArrayIterator(m_end); } // In compiler terminology the control flow between two BasicBlocks // is typically referred to as an "edge". Most well known are the // backward branches for loops, which are often called "back-edges". // // "struct flowList" is the type that represents our control flow edges. // This type is a linked list of zero or more "edges". // (The list of zero edges is represented by NULL.) // Every BasicBlock has a field called bbPreds of this type. This field // represents the list of "edges" that flow into this BasicBlock. // The flowList type only stores the BasicBlock* of the source for the // control flow edge. The destination block for the control flow edge // is implied to be the block which contained the bbPreds field. // // For a switch branch target there may be multiple "edges" that have // the same source block (and destination block). We need to count the // number of these edges so that during optimization we will know when // we have zero of them. Rather than have extra flowList entries we // increment the flDupCount field. // // When we have Profile weight for the BasicBlocks we can usually compute // the number of times each edge was executed by examining the adjacent // BasicBlock weights. As we are doing for BasicBlocks, we call the number // of times that a control flow edge was executed the "edge weight". // In order to compute the edge weights we need to use a bounded range // for every edge weight. These two fields, 'flEdgeWeightMin' and 'flEdgeWeightMax' // are used to hold a bounded range. Most often these will converge such // that both values are the same and that value is the exact edge weight. // Sometimes we are left with a rage of possible values between [Min..Max] // which represents an inexact edge weight. // // The bbPreds list is initially created by Compiler::fgComputePreds() // and is incrementally kept up to date. // // The edge weight are computed by Compiler::fgComputeEdgeWeights() // the edge weights are used to straighten conditional branches // by Compiler::fgReorderBlocks() // // We have a simpler struct, BasicBlockList, which is simply a singly-linked // list of blocks. This is used for various purposes, but one is as a "cheap" // predecessor list, computed by fgComputeCheapPreds(), and stored as a list // on BasicBlock pointed to by bbCheapPreds. struct BasicBlockList { BasicBlockList* next; // The next BasicBlock in the list, nullptr for end of list. BasicBlock* block; // The BasicBlock of interest. BasicBlockList() : next(nullptr), block(nullptr) { } BasicBlockList(BasicBlock* blk, BasicBlockList* rest) : next(rest), block(blk) { } }; // flowList -- control flow edge // struct flowList { public: flowList* flNext; // The next BasicBlock in the list, nullptr for end of list. private: BasicBlock* m_block; // The BasicBlock of interest. weight_t flEdgeWeightMin; weight_t flEdgeWeightMax; public: unsigned flDupCount; // The count of duplicate "edges" (use only for switch stmts) public: BasicBlock* getBlock() const { return m_block; } void setBlock(BasicBlock* newBlock) { m_block = newBlock; } weight_t edgeWeightMin() const { return flEdgeWeightMin; } weight_t edgeWeightMax() const { return flEdgeWeightMax; } // These two methods are used to set new values for flEdgeWeightMin and flEdgeWeightMax // they are used only during the computation of the edge weights // They return false if the newWeight is not between the current [min..max] // when slop is non-zero we allow for the case where our weights might be off by 'slop' // bool setEdgeWeightMinChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop); bool setEdgeWeightMaxChecked(weight_t newWeight, BasicBlock* bDst, weight_t slop, bool* wbUsedSlop); void setEdgeWeights(weight_t newMinWeight, weight_t newMaxWeight, BasicBlock* bDst); flowList(BasicBlock* block, flowList* rest) : flNext(rest), m_block(block), flEdgeWeightMin(0), flEdgeWeightMax(0), flDupCount(0) { } }; // Pred list iterator implementations (that are required to be defined after the declaration of BasicBlock and flowList) inline PredEdgeList::iterator::iterator(flowList* pred) : m_pred(pred) { #ifdef DEBUG m_next = (m_pred == nullptr) ? nullptr : m_pred->flNext; #endif } inline PredEdgeList::iterator& PredEdgeList::iterator::operator++() { flowList* next = m_pred->flNext; #ifdef DEBUG // Check that the next block is the one we expect to see. assert(next == m_next); m_next = (next == nullptr) ? nullptr : next->flNext; #endif // DEBUG m_pred = next; return *this; } inline PredBlockList::iterator::iterator(flowList* pred) : m_pred(pred) { #ifdef DEBUG m_next = (m_pred == nullptr) ? nullptr : m_pred->flNext; #endif } inline BasicBlock* PredBlockList::iterator::operator*() const { return m_pred->getBlock(); } inline PredBlockList::iterator& PredBlockList::iterator::operator++() { flowList* next = m_pred->flNext; #ifdef DEBUG // Check that the next block is the one we expect to see. assert(next == m_next); m_next = (next == nullptr) ? nullptr : next->flNext; #endif // DEBUG m_pred = next; return *this; } // This enum represents a pre/post-visit action state to emulate a depth-first // spanning tree traversal of a tree or graph. enum DfsStackState { DSS_Invalid, // The initialized, invalid error state DSS_Pre, // The DFS pre-order (first visit) traversal state DSS_Post // The DFS post-order (last visit) traversal state }; // These structs represents an entry in a stack used to emulate a non-recursive // depth-first spanning tree traversal of a graph. The entry contains either a // block pointer or a block number depending on which is more useful. struct DfsBlockEntry { DfsStackState dfsStackState; // The pre/post traversal action for this entry BasicBlock* dfsBlock; // The corresponding block for the action DfsBlockEntry(DfsStackState state, BasicBlock* basicBlock) : dfsStackState(state), dfsBlock(basicBlock) { } }; /***************************************************************************** * * The following call-backs supplied by the client; it's used by the code * emitter to convert a basic block to its corresponding emitter cookie. */ void* emitCodeGetCookie(BasicBlock* block); AllSuccessorIterPosition::AllSuccessorIterPosition(Compiler* comp, BasicBlock* block) : m_numNormSuccs(block->NumSucc(comp)), m_remainingNormSucc(m_numNormSuccs), m_ehIter(comp, block) { if (CurTryIsBlkCallFinallyTarget(comp, block)) { m_ehIter.Advance(comp, block); } } bool AllSuccessorIterPosition::CurTryIsBlkCallFinallyTarget(Compiler* comp, BasicBlock* block) { return (block->bbJumpKind == BBJ_CALLFINALLY) && (m_ehIter != EHSuccessorIterPosition()) && (block->bbJumpDest == m_ehIter.Current(comp, block)); } void AllSuccessorIterPosition::Advance(Compiler* comp, BasicBlock* block) { if (m_remainingNormSucc > 0) { m_remainingNormSucc--; } else { m_ehIter.Advance(comp, block); // If the original block whose successors we're iterating over // is a BBJ_CALLFINALLY, that finally clause's first block // will be yielded as a normal successor. Don't also yield as // an exceptional successor. if (CurTryIsBlkCallFinallyTarget(comp, block)) { m_ehIter.Advance(comp, block); } } } // Requires that "this" is not equal to the standard "end" iterator. Returns the // current successor. BasicBlock* AllSuccessorIterPosition::Current(Compiler* comp, BasicBlock* block) { if (m_remainingNormSucc > 0) { return block->GetSucc(m_numNormSuccs - m_remainingNormSucc, comp); } else { return m_ehIter.Current(comp, block); } } typedef BasicBlock::Successors<EHSuccessorIterPosition>::iterator EHSuccessorIter; typedef BasicBlock::Successors<AllSuccessorIterPosition>::iterator AllSuccessorIter; // An enumerator of a block's all successors. In some cases (e.g. SsaBuilder::TopologicalSort) // using iterators is not exactly efficient, at least because they contain an unnecessary // member - a pointer to the Compiler object. class AllSuccessorEnumerator { BasicBlock* m_block; AllSuccessorIterPosition m_pos; public: // Constructs an enumerator of all `block`'s successors. AllSuccessorEnumerator(Compiler* comp, BasicBlock* block) : m_block(block), m_pos(comp, block) { } // Gets the block whose successors are enumerated. BasicBlock* Block() { return m_block; } // Returns true if the next successor is an EH successor. bool IsNextEHSuccessor() { return m_pos.IsCurrentEH(); } // Returns the next available successor or `nullptr` if there are no more successors. BasicBlock* NextSuccessor(Compiler* comp) { if (!m_pos.HasCurrent()) { return nullptr; } BasicBlock* succ = m_pos.Current(comp, m_block); m_pos.Advance(comp, m_block); return succ; } }; // Simple dominator tree node that keeps track of a node's first child and next sibling. // The parent is provided by BasicBlock::bbIDom. struct DomTreeNode { BasicBlock* firstChild; BasicBlock* nextSibling; }; /*****************************************************************************/ #endif // _BLOCK_H_ /*****************************************************************************/
1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/jit/fgbasic.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif // Flowgraph Construction and Maintenance void Compiler::fgInit() { impInit(); /* Initialization for fgWalkTreePre() and fgWalkTreePost() */ fgFirstBBScratch = nullptr; #ifdef DEBUG fgPrintInlinedMethods = false; #endif // DEBUG /* We haven't yet computed the bbPreds lists */ fgComputePredsDone = false; /* We haven't yet computed the bbCheapPreds lists */ fgCheapPredsValid = false; /* We haven't yet computed the edge weight */ fgEdgeWeightsComputed = false; fgHaveValidEdgeWeights = false; fgSlopUsedInEdgeWeights = false; fgRangeUsedInEdgeWeights = true; fgNeedsUpdateFlowGraph = false; fgCalledCount = BB_ZERO_WEIGHT; /* We haven't yet computed the dominator sets */ fgDomsComputed = false; fgReturnBlocksComputed = false; #ifdef DEBUG fgReachabilitySetsValid = false; #endif // DEBUG /* We don't know yet which loops will always execute calls */ fgLoopCallMarked = false; /* Initialize the basic block list */ fgFirstBB = nullptr; fgLastBB = nullptr; fgFirstColdBlock = nullptr; fgEntryBB = nullptr; fgOSREntryBB = nullptr; #if defined(FEATURE_EH_FUNCLETS) fgFirstFuncletBB = nullptr; fgFuncletsCreated = false; #endif // FEATURE_EH_FUNCLETS fgBBcount = 0; #ifdef DEBUG fgBBcountAtCodegen = 0; #endif // DEBUG fgBBNumMax = 0; fgEdgeCount = 0; fgDomBBcount = 0; fgBBVarSetsInited = false; fgReturnCount = 0; // Initialize BlockSet data. fgCurBBEpoch = 0; fgCurBBEpochSize = 0; fgBBSetCountInSizeTUnits = 0; genReturnBB = nullptr; genReturnLocal = BAD_VAR_NUM; /* We haven't reached the global morphing phase */ fgGlobalMorph = false; fgModified = false; #ifdef DEBUG fgSafeBasicBlockCreation = true; #endif // DEBUG fgLocalVarLivenessDone = false; /* Statement list is not threaded yet */ fgStmtListThreaded = false; // Initialize the logic for adding code. This is used to insert code such // as the code that raises an exception when an array range check fails. fgAddCodeList = nullptr; fgAddCodeModf = false; for (int i = 0; i < SCK_COUNT; i++) { fgExcptnTargetCache[i] = nullptr; } /* Keep track of the max count of pointer arguments */ fgPtrArgCntMax = 0; /* This global flag is set whenever we remove a statement */ fgStmtRemoved = false; /* This global flag is set whenever we add a throw block for a RngChk */ fgRngChkThrowAdded = false; /* reset flag for fgIsCodeAdded() */ /* Keep track of whether or not EH statements have been optimized */ fgOptimizedFinally = false; /* We will record a list of all BBJ_RETURN blocks here */ fgReturnBlocks = nullptr; /* This is set by fgComputeReachability */ fgEnterBlks = BlockSetOps::UninitVal(); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) fgAlwaysBlks = BlockSetOps::UninitVal(); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG fgEnterBlksSetValid = false; #endif // DEBUG #if !defined(FEATURE_EH_FUNCLETS) ehMaxHndNestingCount = 0; #endif // !FEATURE_EH_FUNCLETS /* Init the fgBigOffsetMorphingTemps to be BAD_VAR_NUM. */ for (int i = 0; i < TYP_COUNT; i++) { fgBigOffsetMorphingTemps[i] = BAD_VAR_NUM; } fgNoStructPromotion = false; fgNoStructParamPromotion = false; optValnumCSE_phase = false; // referenced in fgMorphSmpOp() #ifdef DEBUG fgNormalizeEHDone = false; #endif // DEBUG #ifdef DEBUG if (!compIsForInlining()) { const int noStructPromotionValue = JitConfig.JitNoStructPromotion(); assert(0 <= noStructPromotionValue && noStructPromotionValue <= 2); if (noStructPromotionValue == 1) { fgNoStructPromotion = true; } if (noStructPromotionValue == 2) { fgNoStructParamPromotion = true; } } #endif // DEBUG if (!compIsForInlining()) { m_promotedStructDeathVars = nullptr; } #ifdef FEATURE_SIMD fgPreviousCandidateSIMDFieldAsgStmt = nullptr; #endif fgHasSwitch = false; fgPgoDisabled = false; fgPgoSchema = nullptr; fgPgoData = nullptr; fgPgoSchemaCount = 0; fgNumProfileRuns = 0; fgPgoBlockCounts = 0; fgPgoEdgeCounts = 0; fgPgoClassProfiles = 0; fgPgoInlineePgo = 0; fgPgoInlineeNoPgo = 0; fgPgoInlineeNoPgoSingleBlock = 0; fgCountInstrumentor = nullptr; fgClassInstrumentor = nullptr; fgPredListSortVector = nullptr; } /***************************************************************************** * * Create a basic block and append it to the current BB list. */ BasicBlock* Compiler::fgNewBasicBlock(BBjumpKinds jumpKind) { // This method must not be called after the exception table has been // constructed, because it doesn't not provide support for patching // the exception table. noway_assert(compHndBBtabCount == 0); BasicBlock* block; /* Allocate the block descriptor */ block = bbNewBasicBlock(jumpKind); noway_assert(block->bbJumpKind == jumpKind); /* Append the block to the end of the global basic block list */ if (fgFirstBB) { fgLastBB->setNext(block); } else { fgFirstBB = block; block->bbPrev = nullptr; } fgLastBB = block; return block; } //------------------------------------------------------------------------ // fgEnsureFirstBBisScratch: Ensure that fgFirstBB is a scratch BasicBlock // // Returns: // Nothing. May allocate a new block and alter the value of fgFirstBB. // // Notes: // This should be called before adding on-entry initialization code to // the method, to ensure that fgFirstBB is not part of a loop. // // Does nothing, if fgFirstBB is already a scratch BB. After calling this, // fgFirstBB may already contain code. Callers have to be careful // that they do not mess up the order of things added to this block and // inadvertently change semantics. // // We maintain the invariant that a scratch BB ends with BBJ_NONE or // BBJ_ALWAYS, so that when adding independent bits of initialization, // callers can generally append to the fgFirstBB block without worring // about what code is there already. // // Can be called at any time, and can be called multiple times. // void Compiler::fgEnsureFirstBBisScratch() { // Have we already allocated a scratch block? if (fgFirstBBisScratch()) { return; } assert(fgFirstBBScratch == nullptr); BasicBlock* block = bbNewBasicBlock(BBJ_NONE); if (fgFirstBB != nullptr) { // If we have profile data the new block will inherit fgFirstBlock's weight if (fgFirstBB->hasProfileWeight()) { block->inheritWeight(fgFirstBB); } // The first block has an implicit ref count which we must // remove. Note the ref count could be greater that one, if // the first block is not scratch and is targeted by a // branch. assert(fgFirstBB->bbRefs >= 1); fgFirstBB->bbRefs--; // The new scratch bb will fall through to the old first bb fgAddRefPred(fgFirstBB, block); fgInsertBBbefore(fgFirstBB, block); } else { noway_assert(fgLastBB == nullptr); fgFirstBB = block; fgLastBB = block; } noway_assert(fgLastBB != nullptr); // Set the expected flags block->bbFlags |= (BBF_INTERNAL | BBF_IMPORTED); // This new first BB has an implicit ref, and no others. block->bbRefs = 1; fgFirstBBScratch = fgFirstBB; #ifdef DEBUG if (verbose) { printf("New scratch " FMT_BB "\n", block->bbNum); } #endif } //------------------------------------------------------------------------ // fgFirstBBisScratch: Check if fgFirstBB is a scratch block // // Returns: // true if fgFirstBB is a scratch block. // bool Compiler::fgFirstBBisScratch() { if (fgFirstBBScratch != nullptr) { assert(fgFirstBBScratch == fgFirstBB); assert(fgFirstBBScratch->bbFlags & BBF_INTERNAL); assert(fgFirstBBScratch->countOfInEdges() == 1); // Normally, the first scratch block is a fall-through block. However, if the block after it was an empty // BBJ_ALWAYS block, it might get removed, and the code that removes it will make the first scratch block // a BBJ_ALWAYS block. assert(fgFirstBBScratch->KindIs(BBJ_NONE, BBJ_ALWAYS)); return true; } else { return false; } } //------------------------------------------------------------------------ // fgBBisScratch: Check if a given block is a scratch block. // // Arguments: // block - block in question // // Returns: // true if this block is the first block and is a scratch block. // bool Compiler::fgBBisScratch(BasicBlock* block) { return fgFirstBBisScratch() && (block == fgFirstBB); } /* Removes a block from the return block list */ void Compiler::fgRemoveReturnBlock(BasicBlock* block) { if (fgReturnBlocks == nullptr) { return; } if (fgReturnBlocks->block == block) { // It's the 1st entry, assign new head of list. fgReturnBlocks = fgReturnBlocks->next; return; } for (BasicBlockList* retBlocks = fgReturnBlocks; retBlocks->next != nullptr; retBlocks = retBlocks->next) { if (retBlocks->next->block == block) { // Found it; splice it out. retBlocks->next = retBlocks->next->next; return; } } } /***************************************************************************** * fgChangeSwitchBlock: * * We have a BBJ_SWITCH jump at 'oldSwitchBlock' and we want to move this * switch jump over to 'newSwitchBlock'. All of the blocks that are jumped * to from jumpTab[] need to have their predecessor lists updated by removing * the 'oldSwitchBlock' and adding 'newSwitchBlock'. */ void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock) { noway_assert(oldSwitchBlock != nullptr); noway_assert(newSwitchBlock != nullptr); noway_assert(oldSwitchBlock->bbJumpKind == BBJ_SWITCH); // Walk the switch's jump table, updating the predecessor for each branch. for (BasicBlock* const bJump : oldSwitchBlock->SwitchTargets()) { noway_assert(bJump != nullptr); // Note that if there are duplicate branch targets in the switch jump table, // fgRemoveRefPred()/fgAddRefPred() will do the right thing: the second and // subsequent duplicates will simply subtract from and add to the duplicate // count (respectively). if (bJump->countOfInEdges() > 0) { // // Remove the old edge [oldSwitchBlock => bJump] // fgRemoveRefPred(bJump, oldSwitchBlock); } else { // bJump->countOfInEdges() must not be zero after preds are calculated. assert(!fgComputePredsDone); } // // Create the new edge [newSwitchBlock => bJump] // fgAddRefPred(bJump, newSwitchBlock); } if (m_switchDescMap != nullptr) { SwitchUniqueSuccSet uniqueSuccSet; // If already computed and cached the unique descriptors for the old block, let's // update those for the new block. if (m_switchDescMap->Lookup(oldSwitchBlock, &uniqueSuccSet)) { m_switchDescMap->Set(newSwitchBlock, uniqueSuccSet, BlockToSwitchDescMap::Overwrite); } else { fgInvalidateSwitchDescMapEntry(newSwitchBlock); } fgInvalidateSwitchDescMapEntry(oldSwitchBlock); } } //------------------------------------------------------------------------ // fgReplaceSwitchJumpTarget: update BBJ_SWITCH block so that all control // that previously flowed to oldTarget now flows to newTarget. // // Arguments: // blockSwitch - block ending in a switch // newTarget - new branch target // oldTarget - old branch target // // Notes: // Updates the jump table and the cached unique target set (if any). // Can be called before or after pred lists are built. // If pred lists are built, updates pred lists. // void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget) { noway_assert(blockSwitch != nullptr); noway_assert(newTarget != nullptr); noway_assert(oldTarget != nullptr); noway_assert(blockSwitch->bbJumpKind == BBJ_SWITCH); // For the jump targets values that match oldTarget of our BBJ_SWITCH // replace predecessor 'blockSwitch' with 'newTarget' // unsigned jumpCnt = blockSwitch->bbJumpSwt->bbsCount; BasicBlock** jumpTab = blockSwitch->bbJumpSwt->bbsDstTab; unsigned i = 0; // Walk the switch's jump table looking for blocks to update the preds for while (i < jumpCnt) { if (jumpTab[i] == oldTarget) // We will update when jumpTab[i] matches { // Remove the old edge [oldTarget from blockSwitch] // if (fgComputePredsDone) { fgRemoveAllRefPreds(oldTarget, blockSwitch); } // // Change the jumpTab entry to branch to the new location // jumpTab[i] = newTarget; // // Create the new edge [newTarget from blockSwitch] // flowList* newEdge = nullptr; if (fgComputePredsDone) { newEdge = fgAddRefPred(newTarget, blockSwitch); } // Now set the correct value of newEdge->flDupCount // and replace any other jumps in jumpTab[] that go to oldTarget. // i++; while (i < jumpCnt) { if (jumpTab[i] == oldTarget) { // // We also must update this entry in the jumpTab // jumpTab[i] = newTarget; newTarget->bbRefs++; // // Increment the flDupCount // if (fgComputePredsDone) { newEdge->flDupCount++; } } i++; // Check the next entry in jumpTab[] } // Maintain, if necessary, the set of unique targets of "block." UpdateSwitchTableTarget(blockSwitch, oldTarget, newTarget); return; // We have replaced the jumps to oldTarget with newTarget } i++; // Check the next entry in jumpTab[] for a match } noway_assert(!"Did not find oldTarget in jumpTab[]"); } //------------------------------------------------------------------------ // Compiler::fgReplaceJumpTarget: For a given block, replace the target 'oldTarget' with 'newTarget'. // // Arguments: // block - the block in which a jump target will be replaced. // newTarget - the new branch target of the block. // oldTarget - the old branch target of the block. // // Notes: // 1. Only branches are changed: BBJ_ALWAYS, the non-fallthrough path of BBJ_COND, BBJ_SWITCH, etc. // We ignore other block types. // 2. All branch targets found are updated. If there are multiple ways for a block // to reach 'oldTarget' (e.g., multiple arms of a switch), all of them are changed. // 3. The predecessor lists are not changed. // 4. If any switch table entry was updated, the switch table "unique successor" cache is invalidated. // // This function is most useful early, before the full predecessor lists have been computed. // void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget) { assert(block != nullptr); switch (block->bbJumpKind) { case BBJ_CALLFINALLY: case BBJ_COND: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: // This function will be called before import, so we still have BBJ_LEAVE if (block->bbJumpDest == oldTarget) { block->bbJumpDest = newTarget; } break; case BBJ_NONE: case BBJ_EHFINALLYRET: case BBJ_THROW: case BBJ_RETURN: break; case BBJ_SWITCH: { unsigned const jumpCnt = block->bbJumpSwt->bbsCount; BasicBlock** const jumpTab = block->bbJumpSwt->bbsDstTab; bool changed = false; for (unsigned i = 0; i < jumpCnt; i++) { if (jumpTab[i] == oldTarget) { jumpTab[i] = newTarget; changed = true; } } if (changed) { InvalidateUniqueSwitchSuccMap(); } break; } default: assert(!"Block doesn't have a valid bbJumpKind!!!!"); unreached(); break; } } //------------------------------------------------------------------------ // fgReplacePred: update the predecessor list, swapping one pred for another // // Arguments: // block - block with the pred list we want to update // oldPred - pred currently appearing in block's pred list // newPred - pred that will take oldPred's place. // // Notes: // // A block can only appear once in the preds list (for normal preds, not // cheap preds): if a predecessor has multiple ways to get to this block, then // flDupCount will be >1, but the block will still appear exactly once. Thus, this // function assumes that all branches from the predecessor (practically, that all // switch cases that target this block) are changed to branch from the new predecessor, // with the same dup count. // // Note that the block bbRefs is not changed, since 'block' has the same number of // references as before, just from a different predecessor block. // // Also note this may cause sorting of the pred list. // void Compiler::fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred) { noway_assert(block != nullptr); noway_assert(oldPred != nullptr); noway_assert(newPred != nullptr); assert(!fgCheapPredsValid); bool modified = false; for (flowList* const pred : block->PredEdges()) { if (oldPred == pred->getBlock()) { pred->setBlock(newPred); modified = true; break; } } // We may now need to reorder the pred list. // if (modified) { block->ensurePredListOrder(this); } } /***************************************************************************** * For a block that is in a handler region, find the first block of the most-nested * handler containing the block. */ BasicBlock* Compiler::fgFirstBlockOfHandler(BasicBlock* block) { assert(block->hasHndIndex()); return ehGetDsc(block->getHndIndex())->ebdHndBeg; } /***************************************************************************** * * The following helps find a basic block given its PC offset. */ void Compiler::fgInitBBLookup() { BasicBlock** dscBBptr; /* Allocate the basic block table */ dscBBptr = fgBBs = new (this, CMK_BasicBlock) BasicBlock*[fgBBcount]; /* Walk all the basic blocks, filling in the table */ for (BasicBlock* const block : Blocks()) { *dscBBptr++ = block; } noway_assert(dscBBptr == fgBBs + fgBBcount); } BasicBlock* Compiler::fgLookupBB(unsigned addr) { unsigned lo; unsigned hi; /* Do a binary search */ for (lo = 0, hi = fgBBcount - 1;;) { AGAIN:; if (lo > hi) { break; } unsigned mid = (lo + hi) / 2; BasicBlock* dsc = fgBBs[mid]; // We introduce internal blocks for BBJ_CALLFINALLY. Skip over these. while (dsc->bbFlags & BBF_INTERNAL) { dsc = dsc->bbNext; mid++; // We skipped over too many, Set hi back to the original mid - 1 if (mid > hi) { mid = (lo + hi) / 2; hi = mid - 1; goto AGAIN; } } unsigned pos = dsc->bbCodeOffs; if (pos < addr) { if ((lo == hi) && (lo == (fgBBcount - 1))) { noway_assert(addr == dsc->bbCodeOffsEnd); return nullptr; // NULL means the end of method } lo = mid + 1; continue; } if (pos > addr) { hi = mid - 1; continue; } return dsc; } #ifdef DEBUG printf("ERROR: Couldn't find basic block at offset %04X\n", addr); #endif // DEBUG NO_WAY("fgLookupBB failed."); } //------------------------------------------------------------------------ // FgStack: simple stack model for the inlinee's evaluation stack. // // Model the inputs available to various operations in the inline body. // Tracks constants, arguments, array lengths. class FgStack { public: FgStack() : slot0(SLOT_INVALID), slot1(SLOT_INVALID), depth(0) { // Empty } enum FgSlot { SLOT_INVALID = UINT_MAX, SLOT_UNKNOWN = 0, SLOT_CONSTANT = 1, SLOT_ARRAYLEN = 2, SLOT_ARGUMENT = 3 }; void Clear() { depth = 0; } void PushUnknown() { Push(SLOT_UNKNOWN); } void PushConstant() { Push(SLOT_CONSTANT); } void PushArrayLen() { Push(SLOT_ARRAYLEN); } void PushArgument(unsigned arg) { Push((FgSlot)(SLOT_ARGUMENT + arg)); } FgSlot GetSlot0() const { return depth >= 1 ? slot0 : FgSlot::SLOT_UNKNOWN; } FgSlot GetSlot1() const { return depth >= 2 ? slot1 : FgSlot::SLOT_UNKNOWN; } FgSlot Top(const int n = 0) { if (n == 0) { return depth >= 1 ? slot0 : SLOT_UNKNOWN; } if (n == 1) { return depth == 2 ? slot1 : SLOT_UNKNOWN; } unreached(); } static bool IsConstant(FgSlot value) { return value == SLOT_CONSTANT; } static bool IsConstantOrConstArg(FgSlot value, InlineInfo* info) { return IsConstant(value) || IsConstArgument(value, info); } static bool IsArrayLen(FgSlot value) { return value == SLOT_ARRAYLEN; } static bool IsArgument(FgSlot value) { return value >= SLOT_ARGUMENT; } static bool IsConstArgument(FgSlot value, InlineInfo* info) { if ((info == nullptr) || !IsArgument(value)) { return false; } const unsigned argNum = value - SLOT_ARGUMENT; if (argNum < info->argCnt) { return info->inlArgInfo[argNum].argIsInvariant; } return false; } static bool IsExactArgument(FgSlot value, InlineInfo* info) { if ((info == nullptr) || !IsArgument(value)) { return false; } const unsigned argNum = value - SLOT_ARGUMENT; if (argNum < info->argCnt) { return info->inlArgInfo[argNum].argIsExact; } return false; } static unsigned SlotTypeToArgNum(FgSlot value) { assert(IsArgument(value)); return value - SLOT_ARGUMENT; } bool IsStackTwoDeep() const { return depth == 2; } bool IsStackOneDeep() const { return depth == 1; } bool IsStackAtLeastOneDeep() const { return depth >= 1; } void Push(FgSlot slot) { assert(depth <= 2); slot1 = slot0; slot0 = slot; if (depth < 2) { depth++; } } private: FgSlot slot0; FgSlot slot1; unsigned depth; }; void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget) { const BYTE* codeBegp = codeAddr; const BYTE* codeEndp = codeAddr + codeSize; unsigned varNum; var_types varType = DUMMY_INIT(TYP_UNDEF); // TYP_ type typeInfo ti; // Verifier type. bool typeIsNormed = false; FgStack pushedStack; const bool isForceInline = (info.compFlags & CORINFO_FLG_FORCEINLINE) != 0; const bool makeInlineObservations = (compInlineResult != nullptr); const bool isInlining = compIsForInlining(); unsigned retBlocks = 0; int prefixFlags = 0; bool preciseScan = makeInlineObservations && compInlineResult->GetPolicy()->RequiresPreciseScan(); const bool resolveTokens = preciseScan; // Track offsets where IL instructions begin in DEBUG builds. Used to // validate debug info generated by the JIT. assert(codeSize == compInlineContext->GetILSize()); INDEBUG(FixedBitVect* ilInstsSet = FixedBitVect::bitVectInit(codeSize, this)); if (makeInlineObservations) { // Set default values for profile (to avoid NoteFailed in CALLEE_IL_CODE_SIZE's handler) // these will be overridden later. compInlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, true); compInlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, 1.0); // Observe force inline state and code size. compInlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, isForceInline); compInlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize); // Determine if call site is within a try. if (isInlining && impInlineInfo->iciBlock->hasTryIndex()) { compInlineResult->Note(InlineObservation::CALLSITE_IN_TRY_REGION); } // Determine if the call site is in a no-return block if (isInlining && (impInlineInfo->iciBlock->bbJumpKind == BBJ_THROW)) { compInlineResult->Note(InlineObservation::CALLSITE_IN_NORETURN_REGION); } // Determine if the call site is in a loop. if (isInlining && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) { compInlineResult->Note(InlineObservation::CALLSITE_IN_LOOP); } #ifdef DEBUG // If inlining, this method should still be a candidate. if (isInlining) { assert(compInlineResult->IsCandidate()); } #endif // DEBUG // note that we're starting to look at the opcodes. compInlineResult->Note(InlineObservation::CALLEE_BEGIN_OPCODE_SCAN); } CORINFO_RESOLVED_TOKEN resolvedToken; OPCODE opcode = CEE_NOP; OPCODE prevOpcode = CEE_NOP; bool handled = false; while (codeAddr < codeEndp) { prevOpcode = opcode; opcode = (OPCODE)getU1LittleEndian(codeAddr); INDEBUG(ilInstsSet->bitVectSet((UINT)(codeAddr - codeBegp))); codeAddr += sizeof(__int8); if (!handled && preciseScan) { // Push something unknown to the stack since we couldn't find anything useful for inlining pushedStack.PushUnknown(); } handled = false; DECODE_OPCODE: if ((unsigned)opcode >= CEE_COUNT) { BADCODE3("Illegal opcode", ": %02X", (int)opcode); } if ((opcode >= CEE_LDARG_0 && opcode <= CEE_STLOC_S) || (opcode >= CEE_LDARG && opcode <= CEE_STLOC)) { opts.lvRefCount++; } if (makeInlineObservations && (opcode >= CEE_LDNULL) && (opcode <= CEE_LDC_R8)) { // LDTOKEN and LDSTR are handled below pushedStack.PushConstant(); handled = true; } unsigned sz = opcodeSizes[opcode]; switch (opcode) { case CEE_PREFIX1: { if (codeAddr >= codeEndp) { goto TOO_FAR; } opcode = (OPCODE)(256 + getU1LittleEndian(codeAddr)); codeAddr += sizeof(__int8); goto DECODE_OPCODE; } case CEE_PREFIX2: case CEE_PREFIX3: case CEE_PREFIX4: case CEE_PREFIX5: case CEE_PREFIX6: case CEE_PREFIX7: case CEE_PREFIXREF: { BADCODE3("Illegal opcode", ": %02X", (int)opcode); } case CEE_SIZEOF: case CEE_LDTOKEN: case CEE_LDSTR: { if (preciseScan) { pushedStack.PushConstant(); handled = true; } break; } case CEE_DUP: { if (preciseScan) { pushedStack.Push(pushedStack.Top()); handled = true; } break; } case CEE_THROW: { if (makeInlineObservations) { compInlineResult->Note(InlineObservation::CALLEE_THROW_BLOCK); } break; } case CEE_BOX: { if (makeInlineObservations) { int toSkip = impBoxPatternMatch(nullptr, codeAddr + sz, codeEndp, true); if (toSkip > 0) { // toSkip > 0 means we most likely will hit a pattern (e.g. box+isinst+brtrue) that // will be folded into a const if (preciseScan) { codeAddr += toSkip; } } } break; } case CEE_CASTCLASS: case CEE_ISINST: { if (makeInlineObservations) { FgStack::FgSlot slot = pushedStack.Top(); if (FgStack::IsConstantOrConstArg(slot, impInlineInfo) || FgStack::IsExactArgument(slot, impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_EXPR_UN); handled = true; // and keep argument in the pushedStack } else if (FgStack::IsArgument(slot)) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CAST); handled = true; // and keep argument in the pushedStack } } break; } case CEE_CALL: case CEE_CALLVIRT: { // There has to be code after the call, otherwise the inlinee is unverifiable. if (isInlining) { noway_assert(codeAddr < codeEndp - sz); } if (!makeInlineObservations) { break; } CORINFO_METHOD_HANDLE methodHnd = nullptr; bool isIntrinsic = false; NamedIntrinsic ni = NI_Illegal; if (resolveTokens) { impResolveToken(codeAddr, &resolvedToken, CORINFO_TOKENKIND_Method); methodHnd = resolvedToken.hMethod; isIntrinsic = eeIsIntrinsic(methodHnd); } if (isIntrinsic) { ni = lookupNamedIntrinsic(methodHnd); bool foldableIntrinsic = false; if (IsMathIntrinsic(ni)) { // Most Math(F) intrinsics have single arguments foldableIntrinsic = FgStack::IsConstantOrConstArg(pushedStack.Top(), impInlineInfo); } else { switch (ni) { // These are most likely foldable without arguments case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: case NI_System_Enum_HasFlag: case NI_System_GC_KeepAlive: { pushedStack.PushUnknown(); foldableIntrinsic = true; break; } case NI_System_Span_get_Item: case NI_System_ReadOnlySpan_get_Item: { if (FgStack::IsArgument(pushedStack.Top(0)) || FgStack::IsArgument(pushedStack.Top(1))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK); } break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant: if (FgStack::IsConstArgument(pushedStack.Top(), impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLEE_CONST_ARG_FEEDS_ISCONST); } else { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_ISCONST); } // RuntimeHelpers.IsKnownConstant is always folded into a const pushedStack.PushConstant(); foldableIntrinsic = true; break; // These are foldable if the first argument is a constant case NI_System_Type_get_IsValueType: case NI_System_Type_GetTypeFromHandle: case NI_System_String_get_Length: case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness: case NI_System_Numerics_BitOperations_PopCount: #if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS) case NI_Vector128_Create: case NI_Vector256_Create: #elif defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS) case NI_Vector64_Create: case NI_Vector128_Create: #endif { // Top() in order to keep it as is in case of foldableIntrinsic if (FgStack::IsConstantOrConstArg(pushedStack.Top(), impInlineInfo)) { foldableIntrinsic = true; } break; } // These are foldable if two arguments are constants case NI_System_Type_op_Equality: case NI_System_Type_op_Inequality: case NI_System_String_get_Chars: case NI_System_Type_IsAssignableTo: case NI_System_Type_IsAssignableFrom: { if (FgStack::IsConstantOrConstArg(pushedStack.Top(0), impInlineInfo) && FgStack::IsConstantOrConstArg(pushedStack.Top(1), impInlineInfo)) { foldableIntrinsic = true; pushedStack.PushConstant(); } break; } case NI_IsSupported_True: case NI_IsSupported_False: { foldableIntrinsic = true; pushedStack.PushConstant(); break; } #if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS) case NI_Vector128_get_Count: case NI_Vector256_get_Count: foldableIntrinsic = true; pushedStack.PushConstant(); // TODO: check if it's a loop condition - we unroll such loops. break; case NI_Vector256_get_Zero: case NI_Vector256_get_AllBitsSet: foldableIntrinsic = true; pushedStack.PushUnknown(); break; #elif defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS) case NI_Vector64_get_Count: case NI_Vector128_get_Count: foldableIntrinsic = true; pushedStack.PushConstant(); break; case NI_Vector128_get_Zero: case NI_Vector128_get_AllBitsSet: foldableIntrinsic = true; pushedStack.PushUnknown(); break; #endif default: { break; } } } if (foldableIntrinsic) { compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_INTRINSIC); handled = true; } else if (ni != NI_Illegal) { // Otherwise note "intrinsic" (most likely will be lowered as single instructions) // except Math where only a few intrinsics won't end up as normal calls if (!IsMathIntrinsic(ni) || IsTargetIntrinsic(ni)) { compInlineResult->Note(InlineObservation::CALLEE_INTRINSIC); } } } if ((codeAddr < codeEndp - sz) && (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET) { // If the method has a call followed by a ret, assume that // it is a wrapper method. compInlineResult->Note(InlineObservation::CALLEE_LOOKS_LIKE_WRAPPER); } if (!isIntrinsic && !handled && FgStack::IsArgument(pushedStack.Top())) { // Optimistically assume that "call(arg)" returns something arg-dependent. // However, we don't know how many args it expects and its return type. handled = true; } } break; case CEE_LDIND_I1: case CEE_LDIND_U1: case CEE_LDIND_I2: case CEE_LDIND_U2: case CEE_LDIND_I4: case CEE_LDIND_U4: case CEE_LDIND_I8: case CEE_LDIND_I: case CEE_LDIND_R4: case CEE_LDIND_R8: case CEE_LDIND_REF: { if (FgStack::IsArgument(pushedStack.Top())) { handled = true; } break; } // Unary operators: case CEE_CONV_I: case CEE_CONV_U: case CEE_CONV_I1: case CEE_CONV_I2: case CEE_CONV_I4: case CEE_CONV_I8: case CEE_CONV_R4: case CEE_CONV_R8: case CEE_CONV_U4: case CEE_CONV_U8: case CEE_CONV_U2: case CEE_CONV_U1: case CEE_CONV_R_UN: case CEE_CONV_OVF_I: case CEE_CONV_OVF_U: case CEE_CONV_OVF_I1: case CEE_CONV_OVF_U1: case CEE_CONV_OVF_I2: case CEE_CONV_OVF_U2: case CEE_CONV_OVF_I4: case CEE_CONV_OVF_U4: case CEE_CONV_OVF_I8: case CEE_CONV_OVF_U8: case CEE_CONV_OVF_I_UN: case CEE_CONV_OVF_U_UN: case CEE_CONV_OVF_I1_UN: case CEE_CONV_OVF_I2_UN: case CEE_CONV_OVF_I4_UN: case CEE_CONV_OVF_I8_UN: case CEE_CONV_OVF_U1_UN: case CEE_CONV_OVF_U2_UN: case CEE_CONV_OVF_U4_UN: case CEE_CONV_OVF_U8_UN: case CEE_NOT: case CEE_NEG: { if (makeInlineObservations) { FgStack::FgSlot arg = pushedStack.Top(); if (FgStack::IsConstArgument(arg, impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_EXPR_UN); handled = true; } else if (FgStack::IsArgument(arg) || FgStack::IsConstant(arg)) { handled = true; } } break; } // Binary operators: case CEE_ADD: case CEE_SUB: case CEE_MUL: case CEE_DIV: case CEE_DIV_UN: case CEE_REM: case CEE_REM_UN: case CEE_AND: case CEE_OR: case CEE_XOR: case CEE_SHL: case CEE_SHR: case CEE_SHR_UN: case CEE_ADD_OVF: case CEE_ADD_OVF_UN: case CEE_MUL_OVF: case CEE_MUL_OVF_UN: case CEE_SUB_OVF: case CEE_SUB_OVF_UN: case CEE_CEQ: case CEE_CGT: case CEE_CGT_UN: case CEE_CLT: case CEE_CLT_UN: { if (!makeInlineObservations) { break; } if (!preciseScan) { switch (opcode) { case CEE_CEQ: case CEE_CGT: case CEE_CGT_UN: case CEE_CLT: case CEE_CLT_UN: fgObserveInlineConstants(opcode, pushedStack, isInlining); break; default: break; } } else { FgStack::FgSlot arg0 = pushedStack.Top(1); FgStack::FgSlot arg1 = pushedStack.Top(0); // Const op ConstArg -> ConstArg if (FgStack::IsConstant(arg0) && FgStack::IsConstArgument(arg1, impInlineInfo)) { // keep stack unchanged handled = true; compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_EXPR); } // ConstArg op Const -> ConstArg // ConstArg op ConstArg -> ConstArg else if (FgStack::IsConstArgument(arg0, impInlineInfo) && FgStack::IsConstantOrConstArg(arg1, impInlineInfo)) { if (FgStack::IsConstant(arg1)) { pushedStack.Push(arg0); } handled = true; compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_EXPR); } // Const op Const -> Const else if (FgStack::IsConstant(arg0) && FgStack::IsConstant(arg1)) { // both are constants, but we're mostly interested in cases where a const arg leads to // a foldable expression. handled = true; } // Arg op ConstArg // Arg op Const else if (FgStack::IsArgument(arg0) && FgStack::IsConstantOrConstArg(arg1, impInlineInfo)) { // "Arg op CNS" --> keep arg0 in the stack for the next ops pushedStack.Push(arg0); handled = true; compInlineResult->Note(InlineObservation::CALLEE_BINARY_EXRP_WITH_CNS); } // ConstArg op Arg // Const op Arg else if (FgStack::IsArgument(arg1) && FgStack::IsConstantOrConstArg(arg0, impInlineInfo)) { // "CNS op ARG" --> keep arg1 in the stack for the next ops handled = true; compInlineResult->Note(InlineObservation::CALLEE_BINARY_EXRP_WITH_CNS); } // X / ConstArg // X % ConstArg if (FgStack::IsConstArgument(arg1, impInlineInfo)) { if ((opcode == CEE_DIV) || (opcode == CEE_DIV_UN) || (opcode == CEE_REM) || (opcode == CEE_REM_UN)) { compInlineResult->Note(InlineObservation::CALLSITE_DIV_BY_CNS); } pushedStack.Push(arg0); handled = true; } } break; } // Jumps case CEE_LEAVE: case CEE_LEAVE_S: case CEE_BR: case CEE_BR_S: case CEE_BRFALSE: case CEE_BRFALSE_S: case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BEQ: case CEE_BEQ_S: case CEE_BGE: case CEE_BGE_S: case CEE_BGE_UN: case CEE_BGE_UN_S: case CEE_BGT: case CEE_BGT_S: case CEE_BGT_UN: case CEE_BGT_UN_S: case CEE_BLE: case CEE_BLE_S: case CEE_BLE_UN: case CEE_BLE_UN_S: case CEE_BLT: case CEE_BLT_S: case CEE_BLT_UN: case CEE_BLT_UN_S: case CEE_BNE_UN: case CEE_BNE_UN_S: { if (codeAddr > codeEndp - sz) { goto TOO_FAR; } // Compute jump target address signed jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0 && (opcode == CEE_LEAVE || opcode == CEE_LEAVE_S || opcode == CEE_BR || opcode == CEE_BR_S)) { break; /* NOP */ } unsigned jmpAddr = (IL_OFFSET)(codeAddr - codeBegp) + sz + jmpDist; // Make sure target is reasonable if (jmpAddr >= codeSize) { BADCODE3("code jumps to outer space", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } if (makeInlineObservations && (jmpDist < 0)) { compInlineResult->Note(InlineObservation::CALLEE_BACKWARD_JUMP); } // Mark the jump target jumpTarget->bitVectSet(jmpAddr); // See if jump might be sensitive to inlining if (!preciseScan && makeInlineObservations && (opcode != CEE_BR_S) && (opcode != CEE_BR)) { fgObserveInlineConstants(opcode, pushedStack, isInlining); } else if (preciseScan && makeInlineObservations) { switch (opcode) { // Binary case CEE_BEQ: case CEE_BGE: case CEE_BGT: case CEE_BLE: case CEE_BLT: case CEE_BNE_UN: case CEE_BGE_UN: case CEE_BGT_UN: case CEE_BLE_UN: case CEE_BLT_UN: case CEE_BEQ_S: case CEE_BGE_S: case CEE_BGT_S: case CEE_BLE_S: case CEE_BLT_S: case CEE_BNE_UN_S: case CEE_BGE_UN_S: case CEE_BGT_UN_S: case CEE_BLE_UN_S: case CEE_BLT_UN_S: { FgStack::FgSlot op1 = pushedStack.Top(1); FgStack::FgSlot op2 = pushedStack.Top(0); if (FgStack::IsConstantOrConstArg(op1, impInlineInfo) && FgStack::IsConstantOrConstArg(op2, impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_BRANCH); } if (FgStack::IsConstArgument(op1, impInlineInfo) || FgStack::IsConstArgument(op2, impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); } if ((FgStack::IsArgument(op1) && FgStack::IsArrayLen(op2)) || (FgStack::IsArgument(op2) && FgStack::IsArrayLen(op1))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK); } else if ((FgStack::IsArgument(op1) && FgStack::IsConstantOrConstArg(op2, impInlineInfo)) || (FgStack::IsArgument(op2) && FgStack::IsConstantOrConstArg(op1, impInlineInfo))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST); } else if (FgStack::IsArgument(op1) || FgStack::IsArgument(op2)) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_TEST); } else if (FgStack::IsConstant(op1) || FgStack::IsConstant(op2)) { compInlineResult->Note(InlineObservation::CALLEE_BINARY_EXRP_WITH_CNS); } break; } // Unary case CEE_BRFALSE_S: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRTRUE: { if (FgStack::IsConstantOrConstArg(pushedStack.Top(), impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_BRANCH); } else if (FgStack::IsArgument(pushedStack.Top())) { // E.g. brtrue is basically "if (X == 0)" compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST); } break; } default: break; } } } break; case CEE_LDFLDA: case CEE_LDFLD: case CEE_STFLD: { if (FgStack::IsArgument(pushedStack.Top())) { compInlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT_FIELD_ACCESS); handled = true; // keep argument on top of the stack } break; } case CEE_LDELEM_I1: case CEE_LDELEM_U1: case CEE_LDELEM_I2: case CEE_LDELEM_U2: case CEE_LDELEM_I4: case CEE_LDELEM_U4: case CEE_LDELEM_I8: case CEE_LDELEM_I: case CEE_LDELEM_R4: case CEE_LDELEM_R8: case CEE_LDELEM_REF: case CEE_STELEM_I: case CEE_STELEM_I1: case CEE_STELEM_I2: case CEE_STELEM_I4: case CEE_STELEM_I8: case CEE_STELEM_R4: case CEE_STELEM_R8: case CEE_STELEM_REF: case CEE_LDELEM: case CEE_STELEM: { if (!preciseScan) { break; } if (FgStack::IsArgument(pushedStack.Top()) || FgStack::IsArgument(pushedStack.Top(1))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK); } break; } case CEE_SWITCH: { if (makeInlineObservations) { compInlineResult->Note(InlineObservation::CALLEE_HAS_SWITCH); if (FgStack::IsConstantOrConstArg(pushedStack.Top(), impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_SWITCH); } // Fail fast, if we're inlining and can't handle this. if (isInlining && compInlineResult->IsFailure()) { return; } } // Make sure we don't go past the end reading the number of cases if (codeAddr > codeEndp - sizeof(DWORD)) { goto TOO_FAR; } // Read the number of cases unsigned jmpCnt = getU4LittleEndian(codeAddr); codeAddr += sizeof(DWORD); if (jmpCnt > codeSize / sizeof(DWORD)) { goto TOO_FAR; } // Find the end of the switch table unsigned jmpBase = (unsigned)((codeAddr - codeBegp) + jmpCnt * sizeof(DWORD)); // Make sure there is more code after the switch if (jmpBase >= codeSize) { goto TOO_FAR; } // jmpBase is also the target of the default case, so mark it jumpTarget->bitVectSet(jmpBase); // Process table entries while (jmpCnt > 0) { unsigned jmpAddr = jmpBase + getI4LittleEndian(codeAddr); codeAddr += 4; if (jmpAddr >= codeSize) { BADCODE3("jump target out of range", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } jumpTarget->bitVectSet(jmpAddr); jmpCnt--; } // We've advanced past all the bytes in this instruction sz = 0; } break; case CEE_UNALIGNED: { noway_assert(sz == sizeof(__int8)); prefixFlags |= PREFIX_UNALIGNED; codeAddr += sizeof(__int8); impValidateMemoryAccessOpcode(codeAddr, codeEndp, false); handled = true; goto OBSERVE_OPCODE; } case CEE_CONSTRAINED: { noway_assert(sz == sizeof(unsigned)); prefixFlags |= PREFIX_CONSTRAINED; codeAddr += sizeof(unsigned); { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN) { BADCODE("constrained. has to be followed by callvirt, call or ldftn"); } } handled = true; goto OBSERVE_OPCODE; } case CEE_READONLY: { noway_assert(sz == 0); prefixFlags |= PREFIX_READONLY; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if ((actualOpcode != CEE_LDELEMA) && !impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("readonly. has to be followed by ldelema or call"); } } handled = true; goto OBSERVE_OPCODE; } case CEE_VOLATILE: { noway_assert(sz == 0); prefixFlags |= PREFIX_VOLATILE; impValidateMemoryAccessOpcode(codeAddr, codeEndp, true); handled = true; goto OBSERVE_OPCODE; } case CEE_TAILCALL: { noway_assert(sz == 0); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("tailcall. has to be followed by call, callvirt or calli"); } } handled = true; goto OBSERVE_OPCODE; } case CEE_STARG: case CEE_STARG_S: { noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD)); if (codeAddr > codeEndp - sz) { goto TOO_FAR; } varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); if (isInlining) { if (varNum < impInlineInfo->argCnt) { impInlineInfo->inlArgInfo[varNum].argHasStargOp = true; } } else { // account for possible hidden param varNum = compMapILargNum(varNum); // This check is only intended to prevent an AV. Bad varNum values will later // be handled properly by the verifier. if (varNum < lvaTableCnt) { // In non-inline cases, note written-to arguments. lvaTable[varNum].lvHasILStoreOp = 1; } } } break; case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: varNum = (opcode - CEE_STLOC_0); goto STLOC; case CEE_STLOC: case CEE_STLOC_S: { noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD)); if (codeAddr > codeEndp - sz) { goto TOO_FAR; } varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); STLOC: if (isInlining) { InlLclVarInfo& lclInfo = impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt]; if (lclInfo.lclHasStlocOp) { lclInfo.lclHasMultipleStlocOp = 1; } else { lclInfo.lclHasStlocOp = 1; } } else { varNum += info.compArgsCount; // This check is only intended to prevent an AV. Bad varNum values will later // be handled properly by the verifier. if (varNum < lvaTableCnt) { // In non-inline cases, note written-to locals. if (lvaTable[varNum].lvHasILStoreOp) { lvaTable[varNum].lvHasMultipleILStoreOp = 1; } else { lvaTable[varNum].lvHasILStoreOp = 1; } } } } break; case CEE_LDLOC_0: case CEE_LDLOC_1: case CEE_LDLOC_2: case CEE_LDLOC_3: // if (preciseScan && makeInlineObservations && (prevOpcode == (CEE_STLOC_3 - (CEE_LDLOC_3 - opcode)))) { // Fold stloc+ldloc pushedStack.Push(pushedStack.Top(1)); // throw away SLOT_UNKNOWN inserted by STLOC handled = true; } break; case CEE_LDARGA: case CEE_LDARGA_S: case CEE_LDLOCA: case CEE_LDLOCA_S: { // Handle address-taken args or locals noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD)); if (codeAddr > codeEndp - sz) { goto TOO_FAR; } varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); if (isInlining) { if (opcode == CEE_LDLOCA || opcode == CEE_LDLOCA_S) { varType = impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt].lclTypeInfo; ti = impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt].lclVerTypeInfo; impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt].lclHasLdlocaOp = true; } else { noway_assert(opcode == CEE_LDARGA || opcode == CEE_LDARGA_S); varType = impInlineInfo->lclVarInfo[varNum].lclTypeInfo; ti = impInlineInfo->lclVarInfo[varNum].lclVerTypeInfo; impInlineInfo->inlArgInfo[varNum].argHasLdargaOp = true; pushedStack.PushArgument(varNum); handled = true; } } else { if (opcode == CEE_LDLOCA || opcode == CEE_LDLOCA_S) { if (varNum >= info.compMethodInfo->locals.numArgs) { BADCODE("bad local number"); } varNum += info.compArgsCount; } else { noway_assert(opcode == CEE_LDARGA || opcode == CEE_LDARGA_S); if (varNum >= info.compILargsCount) { BADCODE("bad argument number"); } varNum = compMapILargNum(varNum); // account for possible hidden param } varType = (var_types)lvaTable[varNum].lvType; ti = lvaTable[varNum].lvVerTypeInfo; // Determine if the next instruction will consume // the address. If so we won't mark this var as // address taken. // // We will put structs on the stack and changing // the addrTaken of a local requires an extra pass // in the morpher so we won't apply this // optimization to structs. // // Debug code spills for every IL instruction, and // therefore it will split statements, so we will // need the address. Note that this optimization // is based in that we know what trees we will // generate for this ldfld, and we require that we // won't need the address of this local at all const bool notStruct = !varTypeIsStruct(lvaGetDesc(varNum)); const bool notLastInstr = (codeAddr < codeEndp - sz); const bool notDebugCode = !opts.compDbgCode; if (notStruct && notLastInstr && notDebugCode && impILConsumesAddr(codeAddr + sz)) { // We can skip the addrtaken, as next IL instruction consumes // the address. } else { lvaTable[varNum].lvHasLdAddrOp = 1; if (!info.compIsStatic && (varNum == 0)) { // Addr taken on "this" pointer is significant, // go ahead to mark it as permanently addr-exposed here. // This may be conservative, but probably not very. lvaSetVarAddrExposed(0 DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); } } } // isInlining typeIsNormed = ti.IsValueClass() && !varTypeIsStruct(varType); } break; case CEE_JMP: retBlocks++; #if !defined(TARGET_X86) && !defined(TARGET_ARM) if (!isInlining) { // We transform this into a set of ldarg's + tail call and // thus may push more onto the stack than originally thought. // This doesn't interfere with verification because CEE_JMP // is never verifiable, and there's nothing unsafe you can // do with a an IL stack overflow if the JIT is expecting it. info.compMaxStack = max(info.compMaxStack, info.compILargsCount); break; } #endif // !TARGET_X86 && !TARGET_ARM // If we are inlining, we need to fail for a CEE_JMP opcode, just like // the list of other opcodes (for all platforms). FALLTHROUGH; case CEE_MKREFANY: case CEE_RETHROW: if (makeInlineObservations) { // Arguably this should be NoteFatal, but the legacy behavior is // to ignore this for the prejit root. compInlineResult->Note(InlineObservation::CALLEE_UNSUPPORTED_OPCODE); // Fail fast if we're inlining... if (isInlining) { assert(compInlineResult->IsFailure()); return; } } break; case CEE_LOCALLOC: compLocallocSeen = true; // We now allow localloc callees to become candidates in some cases. if (makeInlineObservations) { compInlineResult->Note(InlineObservation::CALLEE_HAS_LOCALLOC); if (isInlining && compInlineResult->IsFailure()) { return; } } break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: if (makeInlineObservations) { pushedStack.PushArgument(opcode - CEE_LDARG_0); handled = true; } break; case CEE_LDARG_S: case CEE_LDARG: { if (codeAddr > codeEndp - sz) { goto TOO_FAR; } varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); if (makeInlineObservations) { pushedStack.PushArgument(varNum); handled = true; } } break; case CEE_LDLEN: if (makeInlineObservations) { pushedStack.PushArrayLen(); handled = true; } break; case CEE_RET: retBlocks++; break; default: break; } // Skip any remaining operands this opcode may have codeAddr += sz; // Clear any prefix flags that may have been set prefixFlags = 0; // Increment the number of observed instructions opts.instrCount++; OBSERVE_OPCODE: // Note the opcode we just saw if (makeInlineObservations) { InlineObservation obs = typeIsNormed ? InlineObservation::CALLEE_OPCODE_NORMED : InlineObservation::CALLEE_OPCODE; compInlineResult->NoteInt(obs, opcode); } typeIsNormed = false; } if (codeAddr != codeEndp) { TOO_FAR: BADCODE3("Code ends in the middle of an opcode, or there is a branch past the end of the method", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } INDEBUG(compInlineContext->SetILInstsSet(ilInstsSet)); if (makeInlineObservations) { compInlineResult->Note(InlineObservation::CALLEE_END_OPCODE_SCAN); // If there are no return blocks we know it does not return, however if there // return blocks we don't know it returns as it may be counting unreachable code. // However we will still make the CALLEE_DOES_NOT_RETURN observation. compInlineResult->NoteBool(InlineObservation::CALLEE_DOES_NOT_RETURN, retBlocks == 0); if (retBlocks == 0 && isInlining) { // Mark the call node as "no return" as it can impact caller's code quality. impInlineInfo->iciCall->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; // Mark root method as containing a noreturn call. impInlineRoot()->setMethodHasNoReturnCalls(); } // If the inline is viable and discretionary, do the // profitability screening. if (compInlineResult->IsDiscretionaryCandidate()) { // Make some callsite specific observations that will feed // into the profitability model. impMakeDiscretionaryInlineObservations(impInlineInfo, compInlineResult); // None of those observations should have changed the // inline's viability. assert(compInlineResult->IsCandidate()); if (isInlining) { // Assess profitability... CORINFO_METHOD_INFO* methodInfo = &impInlineInfo->inlineCandidateInfo->methInfo; compInlineResult->DetermineProfitability(methodInfo); if (compInlineResult->IsFailure()) { impInlineRoot()->m_inlineStrategy->NoteUnprofitable(); JITDUMP("\n\nInline expansion aborted, inline not profitable\n"); return; } else { // The inline is still viable. assert(compInlineResult->IsCandidate()); } } else { // Prejit root case. Profitability assessment for this // is done over in compCompileHelper. } } } // None of the local vars in the inlinee should have address taken or been written to. // Therefore we should NOT need to enter this "if" statement. if (!isInlining && !info.compIsStatic) { fgAdjustForAddressExposedOrWrittenThis(); } // Now that we've seen the IL, set lvSingleDef for root method // locals. // // We could also do this for root method arguments but single-def // arguments are set by the caller and so we don't know anything // about the possible values or types. // // For inlinees we do this over in impInlineFetchLocal and // impInlineFetchArg (here args are included as we somtimes get // new information about the types of inlinee args). if (!isInlining) { const unsigned firstLcl = info.compArgsCount; const unsigned lastLcl = firstLcl + info.compMethodInfo->locals.numArgs; for (unsigned lclNum = firstLcl; lclNum < lastLcl; lclNum++) { LclVarDsc* lclDsc = lvaGetDesc(lclNum); assert(lclDsc->lvSingleDef == 0); // could restrict this to TYP_REF lclDsc->lvSingleDef = !lclDsc->lvHasMultipleILStoreOp && !lclDsc->lvHasLdAddrOp; if (lclDsc->lvSingleDef) { JITDUMP("Marked V%02u as a single def local\n", lclNum); } } } } //------------------------------------------------------------------------ // fgAdjustForAddressExposedOrWrittenThis: update var table for cases // where the this pointer value can change. // // Notes: // Modifies lvaArg0Var to refer to a temp if the value of 'this' can // change. The original this (info.compThisArg) then remains // unmodified in the method. fgAddInternal is reponsible for // adding the code to copy the initial this into the temp. void Compiler::fgAdjustForAddressExposedOrWrittenThis() { LclVarDsc* thisVarDsc = lvaGetDesc(info.compThisArg); // Optionally enable adjustment during stress. if (compStressCompile(STRESS_GENERIC_VARN, 15)) { thisVarDsc->lvHasILStoreOp = true; } // If this is exposed or written to, create a temp for the modifiable this if (thisVarDsc->IsAddressExposed() || thisVarDsc->lvHasILStoreOp) { // If there is a "ldarga 0" or "starg 0", grab and use the temp. lvaArg0Var = lvaGrabTemp(false DEBUGARG("Address-exposed, or written this pointer")); noway_assert(lvaArg0Var > (unsigned)info.compThisArg); LclVarDsc* arg0varDsc = lvaGetDesc(lvaArg0Var); arg0varDsc->lvType = thisVarDsc->TypeGet(); arg0varDsc->SetAddressExposed(thisVarDsc->IsAddressExposed() DEBUGARG(thisVarDsc->GetAddrExposedReason())); arg0varDsc->lvDoNotEnregister = thisVarDsc->lvDoNotEnregister; #ifdef DEBUG arg0varDsc->SetDoNotEnregReason(thisVarDsc->GetDoNotEnregReason()); #endif arg0varDsc->lvHasILStoreOp = thisVarDsc->lvHasILStoreOp; arg0varDsc->lvVerTypeInfo = thisVarDsc->lvVerTypeInfo; // Clear the TI_FLAG_THIS_PTR in the original 'this' pointer. noway_assert(arg0varDsc->lvVerTypeInfo.IsThisPtr()); thisVarDsc->lvVerTypeInfo.ClearThisPtr(); // Note that here we don't clear `m_doNotEnregReason` and it stays // `doNotEnreg` with `AddrExposed` reason. thisVarDsc->CleanAddressExposed(); thisVarDsc->lvHasILStoreOp = false; } } //------------------------------------------------------------------------ // fgObserveInlineConstants: look for operations that might get optimized // if this method were to be inlined, and report these to the inliner. // // Arguments: // opcode -- MSIL opcode under consideration // stack -- abstract stack model at this point in the IL // isInlining -- true if we're inlining (vs compiling a prejit root) // // Notes: // Currently only invoked on compare and branch opcodes. // // If we're inlining we also look at the argument values supplied by // the caller at this call site. // // The crude stack model may overestimate stack depth. void Compiler::fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining) { // We should be able to record inline observations. assert(compInlineResult != nullptr); // The stack only has to be 1 deep for BRTRUE/FALSE bool lookForBranchCases = stack.IsStackAtLeastOneDeep(); if (lookForBranchCases) { if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S || opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) { FgStack::FgSlot slot0 = stack.GetSlot0(); if (FgStack::IsArgument(slot0)) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST); if (isInlining) { // Check for the double whammy of an incoming constant argument // feeding a constant test. unsigned varNum = FgStack::SlotTypeToArgNum(slot0); if (impInlineInfo->inlArgInfo[varNum].argIsInvariant) { compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); } } } return; } } // Remaining cases require at least two things on the stack. if (!stack.IsStackTwoDeep()) { return; } FgStack::FgSlot slot0 = stack.GetSlot0(); FgStack::FgSlot slot1 = stack.GetSlot1(); // Arg feeds constant test if ((FgStack::IsConstant(slot0) && FgStack::IsArgument(slot1)) || (FgStack::IsConstant(slot1) && FgStack::IsArgument(slot0))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST); } // Arg feeds range check if ((FgStack::IsArrayLen(slot0) && FgStack::IsArgument(slot1)) || (FgStack::IsArrayLen(slot1) && FgStack::IsArgument(slot0))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK); } // Check for an incoming arg that's a constant if (isInlining) { if (FgStack::IsArgument(slot0)) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_TEST); unsigned varNum = FgStack::SlotTypeToArgNum(slot0); if (impInlineInfo->inlArgInfo[varNum].argIsInvariant) { compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); } } if (FgStack::IsArgument(slot1)) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_TEST); unsigned varNum = FgStack::SlotTypeToArgNum(slot1); if (impInlineInfo->inlArgInfo[varNum].argIsInvariant) { compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); } } } } #ifdef _PREFAST_ #pragma warning(pop) #endif //------------------------------------------------------------------------ // fgMarkBackwardJump: mark blocks indicating there is a jump backwards in // IL, from a higher to lower IL offset. // // Arguments: // targetBlock -- target of the jump // sourceBlock -- source of the jump void Compiler::fgMarkBackwardJump(BasicBlock* targetBlock, BasicBlock* sourceBlock) { noway_assert(targetBlock->bbNum <= sourceBlock->bbNum); for (BasicBlock* const block : Blocks(targetBlock, sourceBlock)) { if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && (block->bbJumpKind != BBJ_RETURN)) { block->bbFlags |= BBF_BACKWARD_JUMP; compHasBackwardJump = true; } } targetBlock->bbFlags |= BBF_BACKWARD_JUMP_TARGET; } /***************************************************************************** * * Finally link up the bbJumpDest of the blocks together */ void Compiler::fgLinkBasicBlocks() { /* Create the basic block lookup tables */ fgInitBBLookup(); /* First block is always reachable */ fgFirstBB->bbRefs = 1; /* Walk all the basic blocks, filling in the target addresses */ for (BasicBlock* const curBBdesc : Blocks()) { switch (curBBdesc->bbJumpKind) { case BBJ_COND: case BBJ_ALWAYS: case BBJ_LEAVE: curBBdesc->bbJumpDest = fgLookupBB(curBBdesc->bbJumpOffs); curBBdesc->bbJumpDest->bbRefs++; if (curBBdesc->bbJumpDest->bbNum <= curBBdesc->bbNum) { fgMarkBackwardJump(curBBdesc->bbJumpDest, curBBdesc); } /* Is the next block reachable? */ if (curBBdesc->KindIs(BBJ_ALWAYS, BBJ_LEAVE)) { break; } if (!curBBdesc->bbNext) { BADCODE("Fall thru the end of a method"); } // Fall through, the next block is also reachable FALLTHROUGH; case BBJ_NONE: curBBdesc->bbNext->bbRefs++; break; case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_THROW: case BBJ_RETURN: break; case BBJ_SWITCH: unsigned jumpCnt; jumpCnt = curBBdesc->bbJumpSwt->bbsCount; BasicBlock** jumpPtr; jumpPtr = curBBdesc->bbJumpSwt->bbsDstTab; do { *jumpPtr = fgLookupBB((unsigned)*(size_t*)jumpPtr); (*jumpPtr)->bbRefs++; if ((*jumpPtr)->bbNum <= curBBdesc->bbNum) { fgMarkBackwardJump(*jumpPtr, curBBdesc); } } while (++jumpPtr, --jumpCnt); /* Default case of CEE_SWITCH (next block), is at end of jumpTab[] */ noway_assert(*(jumpPtr - 1) == curBBdesc->bbNext); break; case BBJ_CALLFINALLY: // BBJ_CALLFINALLY and BBJ_EHCATCHRET don't appear until later case BBJ_EHCATCHRET: default: noway_assert(!"Unexpected bbJumpKind"); break; } } } //------------------------------------------------------------------------ // fgMakeBasicBlocks: walk the IL creating basic blocks, and look for // operations that might get optimized if this method were to be inlined. // // Arguments: // codeAddr -- starting address of the method's IL stream // codeSize -- length of the IL stream // jumpTarget -- [in] bit vector of jump targets found by fgFindJumpTargets // // Returns: // number of return blocks (BBJ_RETURN) in the method (may be zero) // // Notes: // Invoked for prejited and jitted methods, and for all inlinees unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget) { unsigned retBlocks = 0; const BYTE* codeBegp = codeAddr; const BYTE* codeEndp = codeAddr + codeSize; bool tailCall = false; unsigned curBBoffs = 0; BasicBlock* curBBdesc; // Keep track of where we are in the scope lists, as we will also // create blocks at scope boundaries. if (opts.compDbgCode && (info.compVarScopesCount > 0)) { compResetScopeLists(); // Ignore scopes beginning at offset 0 while (compGetNextEnterScope(0)) { /* do nothing */ } while (compGetNextExitScope(0)) { /* do nothing */ } } do { unsigned jmpAddr = DUMMY_INIT(BAD_IL_OFFSET); BasicBlockFlags bbFlags = BBF_EMPTY; BBswtDesc* swtDsc = nullptr; unsigned nxtBBoffs; OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); BBjumpKinds jmpKind = BBJ_NONE; DECODE_OPCODE: /* Get the size of additional parameters */ noway_assert((unsigned)opcode < CEE_COUNT); unsigned sz = opcodeSizes[opcode]; switch (opcode) { signed jmpDist; case CEE_PREFIX1: if (jumpTarget->bitVectTest((UINT)(codeAddr - codeBegp))) { BADCODE3("jump target between prefix 0xFE and opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } opcode = (OPCODE)(256 + getU1LittleEndian(codeAddr)); codeAddr += sizeof(__int8); goto DECODE_OPCODE; /* Check to see if we have a jump/return opcode */ case CEE_BRFALSE: case CEE_BRFALSE_S: case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BEQ: case CEE_BEQ_S: case CEE_BGE: case CEE_BGE_S: case CEE_BGE_UN: case CEE_BGE_UN_S: case CEE_BGT: case CEE_BGT_S: case CEE_BGT_UN: case CEE_BGT_UN_S: case CEE_BLE: case CEE_BLE_S: case CEE_BLE_UN: case CEE_BLE_UN_S: case CEE_BLT: case CEE_BLT_S: case CEE_BLT_UN: case CEE_BLT_UN_S: case CEE_BNE_UN: case CEE_BNE_UN_S: jmpKind = BBJ_COND; goto JMP; case CEE_LEAVE: case CEE_LEAVE_S: // We need to check if we are jumping out of a finally-protected try. jmpKind = BBJ_LEAVE; goto JMP; case CEE_BR: case CEE_BR_S: jmpKind = BBJ_ALWAYS; goto JMP; JMP: /* Compute the target address of the jump */ jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0 && (opcode == CEE_BR || opcode == CEE_BR_S)) { continue; /* NOP */ } jmpAddr = (IL_OFFSET)(codeAddr - codeBegp) + sz + jmpDist; break; case CEE_SWITCH: { unsigned jmpBase; unsigned jmpCnt; // # of switch cases (excluding default) BasicBlock** jmpTab; BasicBlock** jmpPtr; /* Allocate the switch descriptor */ swtDsc = new (this, CMK_BasicBlock) BBswtDesc; /* Read the number of entries in the table */ jmpCnt = getU4LittleEndian(codeAddr); codeAddr += 4; /* Compute the base offset for the opcode */ jmpBase = (IL_OFFSET)((codeAddr - codeBegp) + jmpCnt * sizeof(DWORD)); /* Allocate the jump table */ jmpPtr = jmpTab = new (this, CMK_BasicBlock) BasicBlock*[jmpCnt + 1]; /* Fill in the jump table */ for (unsigned count = jmpCnt; count; count--) { jmpDist = getI4LittleEndian(codeAddr); codeAddr += 4; // store the offset in the pointer. We change these in fgLinkBasicBlocks(). *jmpPtr++ = (BasicBlock*)(size_t)(jmpBase + jmpDist); } /* Append the default label to the target table */ *jmpPtr++ = (BasicBlock*)(size_t)jmpBase; /* Make sure we found the right number of labels */ noway_assert(jmpPtr == jmpTab + jmpCnt + 1); /* Compute the size of the switch opcode operands */ sz = sizeof(DWORD) + jmpCnt * sizeof(DWORD); /* Fill in the remaining fields of the switch descriptor */ swtDsc->bbsCount = jmpCnt + 1; swtDsc->bbsDstTab = jmpTab; /* This is definitely a jump */ jmpKind = BBJ_SWITCH; fgHasSwitch = true; if (opts.compProcedureSplitting) { // TODO-CQ: We might need to create a switch table; we won't know for sure until much later. // However, switch tables don't work with hot/cold splitting, currently. The switch table data needs // a relocation such that if the base (the first block after the prolog) and target of the switch // branch are put in different sections, the difference stored in the table is updated. However, our // relocation implementation doesn't support three different pointers (relocation address, base, and // target). So, we need to change our switch table implementation to be more like // JIT64: put the table in the code section, in the same hot/cold section as the switch jump itself // (maybe immediately after the switch jump), and make the "base" address be also in that section, // probably the address after the switch jump. opts.compProcedureSplitting = false; JITDUMP("Turning off procedure splitting for this method, as it might need switch tables; " "implementation limitation.\n"); } } goto GOT_ENDP; case CEE_ENDFILTER: bbFlags |= BBF_DONT_REMOVE; jmpKind = BBJ_EHFILTERRET; break; case CEE_ENDFINALLY: jmpKind = BBJ_EHFINALLYRET; break; case CEE_TAILCALL: if (compIsForInlining()) { // TODO-CQ: We can inline some callees with explicit tail calls if we can guarantee that the calls // can be dispatched as tail calls from the caller. compInlineResult->NoteFatal(InlineObservation::CALLEE_EXPLICIT_TAIL_PREFIX); retBlocks++; return retBlocks; } FALLTHROUGH; case CEE_READONLY: case CEE_CONSTRAINED: case CEE_VOLATILE: case CEE_UNALIGNED: // fgFindJumpTargets should have ruled out this possibility // (i.e. a prefix opcodes as last intruction in a block) noway_assert(codeAddr < codeEndp); if (jumpTarget->bitVectTest((UINT)(codeAddr - codeBegp))) { BADCODE3("jump target between prefix and an opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } break; case CEE_CALL: case CEE_CALLVIRT: case CEE_CALLI: { if (compIsForInlining() || // Ignore tail call in the inlinee. Period. (!tailCall && !compTailCallStress()) // A new BB with BBJ_RETURN would have been created // after a tailcall statement. // We need to keep this invariant if we want to stress the tailcall. // That way, the potential (tail)call statement is always the last // statement in the block. // Otherwise, we will assert at the following line in fgMorphCall() // noway_assert(fgMorphStmt->GetNextStmt() == NULL); ) { // Neither .tailcall prefix, no tailcall stress. So move on. break; } // Make sure the code sequence is legal for the tail call. // If so, mark this BB as having a BBJ_RETURN. if (codeAddr >= codeEndp - sz) { BADCODE3("No code found after the call instruction", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } if (tailCall) { // impIsTailCallILPattern uses isRecursive flag to determine whether ret in a fallthrough block is // allowed. We don't know at this point whether the call is recursive so we conservatively pass // false. This will only affect explicit tail calls when IL verification is not needed for the // method. bool isRecursive = false; if (!impIsTailCallILPattern(tailCall, opcode, codeAddr + sz, codeEndp, isRecursive)) { BADCODE3("tail call not followed by ret", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } if (fgMayExplicitTailCall()) { compTailPrefixSeen = true; } } else { OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr + sz); if (nextOpcode != CEE_RET) { noway_assert(compTailCallStress()); // Next OPCODE is not a CEE_RET, bail the attempt to stress the tailcall. // (I.e. We will not make a new BB after the "call" statement.) break; } } } /* For tail call, we just call CORINFO_HELP_TAILCALL, and it jumps to the target. So we don't need an epilog - just like CORINFO_HELP_THROW. Make the block BBJ_RETURN, but we will change it to BBJ_THROW if the tailness of the call is satisfied. NOTE : The next instruction is guaranteed to be a CEE_RET and it will create another BasicBlock. But there may be an jump directly to that CEE_RET. If we want to avoid creating an unnecessary block, we need to check if the CEE_RETURN is the target of a jump. */ FALLTHROUGH; case CEE_JMP: /* These are equivalent to a return from the current method But instead of directly returning to the caller we jump and execute something else in between */ case CEE_RET: retBlocks++; jmpKind = BBJ_RETURN; break; case CEE_THROW: case CEE_RETHROW: jmpKind = BBJ_THROW; break; #ifdef DEBUG // make certain we did not forget any flow of control instructions // by checking the 'ctrl' field in opcode.def. First filter out all // non-ctrl instructions #define BREAK(name) \ case name: \ break; #define NEXT(name) \ case name: \ break; #define CALL(name) #define THROW(name) #undef RETURN // undef contract RETURN macro #define RETURN(name) #define META(name) #define BRANCH(name) #define COND_BRANCH(name) #define PHI(name) #define OPDEF(name, string, pop, push, oprType, opcType, l, s1, s2, ctrl) ctrl(name) #include "opcode.def" #undef OPDEF #undef PHI #undef BREAK #undef CALL #undef NEXT #undef THROW #undef RETURN #undef META #undef BRANCH #undef COND_BRANCH // These ctrl-flow opcodes don't need any special handling case CEE_NEWOBJ: // CTRL_CALL break; // what's left are forgotten instructions default: BADCODE("Unrecognized control Opcode"); break; #else // !DEBUG default: break; #endif // !DEBUG } /* Jump over the operand */ codeAddr += sz; GOT_ENDP: tailCall = (opcode == CEE_TAILCALL); /* Make sure a jump target isn't in the middle of our opcode */ if (sz) { IL_OFFSET offs = (IL_OFFSET)(codeAddr - codeBegp) - sz; // offset of the operand for (unsigned i = 0; i < sz; i++, offs++) { if (jumpTarget->bitVectTest(offs)) { BADCODE3("jump into the middle of an opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } } } /* Compute the offset of the next opcode */ nxtBBoffs = (IL_OFFSET)(codeAddr - codeBegp); bool foundScope = false; if (opts.compDbgCode && (info.compVarScopesCount > 0)) { while (compGetNextEnterScope(nxtBBoffs)) { foundScope = true; } while (compGetNextExitScope(nxtBBoffs)) { foundScope = true; } } /* Do we have a jump? */ if (jmpKind == BBJ_NONE) { /* No jump; make sure we don't fall off the end of the function */ if (codeAddr == codeEndp) { BADCODE3("missing return opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } /* If a label follows this opcode, we'll have to make a new BB */ bool makeBlock = jumpTarget->bitVectTest(nxtBBoffs); if (!makeBlock && foundScope) { makeBlock = true; #ifdef DEBUG if (verbose) { printf("Splitting at BBoffs = %04u\n", nxtBBoffs); } #endif // DEBUG } if (!makeBlock) { continue; } } /* We need to create a new basic block */ curBBdesc = fgNewBasicBlock(jmpKind); curBBdesc->bbFlags |= bbFlags; curBBdesc->bbRefs = 0; curBBdesc->bbCodeOffs = curBBoffs; curBBdesc->bbCodeOffsEnd = nxtBBoffs; switch (jmpKind) { case BBJ_SWITCH: curBBdesc->bbJumpSwt = swtDsc; break; case BBJ_COND: case BBJ_ALWAYS: case BBJ_LEAVE: noway_assert(jmpAddr != DUMMY_INIT(BAD_IL_OFFSET)); curBBdesc->bbJumpOffs = jmpAddr; break; default: break; } DBEXEC(verbose, curBBdesc->dspBlockHeader(this, false, false, false)); /* Remember where the next BB will start */ curBBoffs = nxtBBoffs; } while (codeAddr < codeEndp); noway_assert(codeAddr == codeEndp); /* Finally link up the bbJumpDest of the blocks together */ fgLinkBasicBlocks(); return retBlocks; } /***************************************************************************** * * Main entry point to discover the basic blocks for the current function. */ void Compiler::fgFindBasicBlocks() { #ifdef DEBUG if (verbose) { printf("*************** In fgFindBasicBlocks() for %s\n", info.compFullName); } // Call this here so any dump printing it inspires doesn't appear in the bb table. // fgStressBBProf(); #endif // Allocate the 'jump target' bit vector FixedBitVect* jumpTarget = FixedBitVect::bitVectInit(info.compILCodeSize + 1, this); // Walk the instrs to find all jump targets fgFindJumpTargets(info.compCode, info.compILCodeSize, jumpTarget); if (compDonotInline()) { return; } unsigned XTnum; /* Are there any exception handlers? */ if (info.compXcptnsCount > 0) { noway_assert(!compIsForInlining()); /* Check and mark all the exception handlers */ for (XTnum = 0; XTnum < info.compXcptnsCount; XTnum++) { CORINFO_EH_CLAUSE clause; info.compCompHnd->getEHinfo(info.compMethodHnd, XTnum, &clause); noway_assert(clause.HandlerLength != (unsigned)-1); if (clause.TryLength <= 0) { BADCODE("try block length <=0"); } /* Mark the 'try' block extent and the handler itself */ if (clause.TryOffset > info.compILCodeSize) { BADCODE("try offset is > codesize"); } jumpTarget->bitVectSet(clause.TryOffset); if (clause.TryOffset + clause.TryLength > info.compILCodeSize) { BADCODE("try end is > codesize"); } jumpTarget->bitVectSet(clause.TryOffset + clause.TryLength); if (clause.HandlerOffset > info.compILCodeSize) { BADCODE("handler offset > codesize"); } jumpTarget->bitVectSet(clause.HandlerOffset); if (clause.HandlerOffset + clause.HandlerLength > info.compILCodeSize) { BADCODE("handler end > codesize"); } jumpTarget->bitVectSet(clause.HandlerOffset + clause.HandlerLength); if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { if (clause.FilterOffset > info.compILCodeSize) { BADCODE("filter offset > codesize"); } jumpTarget->bitVectSet(clause.FilterOffset); } } } #ifdef DEBUG if (verbose) { bool anyJumpTargets = false; printf("Jump targets:\n"); for (unsigned i = 0; i < info.compILCodeSize + 1; i++) { if (jumpTarget->bitVectTest(i)) { anyJumpTargets = true; printf(" IL_%04x\n", i); } } if (!anyJumpTargets) { printf(" none\n"); } } #endif // DEBUG /* Now create the basic blocks */ unsigned retBlocks = fgMakeBasicBlocks(info.compCode, info.compILCodeSize, jumpTarget); if (compIsForInlining()) { #ifdef DEBUG // If fgFindJumpTargets marked the call as "no return" there // really should be no BBJ_RETURN blocks in the method. bool markedNoReturn = (impInlineInfo->iciCall->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0; assert((markedNoReturn && (retBlocks == 0)) || (!markedNoReturn && (retBlocks >= 1))); #endif // DEBUG if (compInlineResult->IsFailure()) { return; } noway_assert(info.compXcptnsCount == 0); compHndBBtab = impInlineInfo->InlinerCompiler->compHndBBtab; compHndBBtabAllocCount = impInlineInfo->InlinerCompiler->compHndBBtabAllocCount; // we probably only use the table, not add to it. compHndBBtabCount = impInlineInfo->InlinerCompiler->compHndBBtabCount; info.compXcptnsCount = impInlineInfo->InlinerCompiler->info.compXcptnsCount; // Use a spill temp for the return value if there are multiple return blocks, // or if the inlinee has GC ref locals. if ((info.compRetNativeType != TYP_VOID) && ((retBlocks > 1) || impInlineInfo->HasGcRefLocals())) { // If we've spilled the ret expr to a temp we can reuse the temp // as the inlinee return spill temp. // // Todo: see if it is even better to always use this existing temp // for return values, even if we otherwise wouldn't need a return spill temp... lvaInlineeReturnSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM) { // This temp should already have the type of the return value. JITDUMP("\nInliner: re-using pre-existing spill temp V%02u\n", lvaInlineeReturnSpillTemp); if (info.compRetType == TYP_REF) { // We may have co-opted an existing temp for the return spill. // We likely assumed it was single-def at the time, but now // we can see it has multiple definitions. if ((retBlocks > 1) && (lvaTable[lvaInlineeReturnSpillTemp].lvSingleDef == 1)) { // Make sure it is no longer marked single def. This is only safe // to do if we haven't ever updated the type. assert(!lvaTable[lvaInlineeReturnSpillTemp].lvClassInfoUpdated); JITDUMP("Marked return spill temp V%02u as NOT single def temp\n", lvaInlineeReturnSpillTemp); lvaTable[lvaInlineeReturnSpillTemp].lvSingleDef = 0; } } } else { // The lifetime of this var might expand multiple BBs. So it is a long lifetime compiler temp. lvaInlineeReturnSpillTemp = lvaGrabTemp(false DEBUGARG("Inline return value spill temp")); lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetType; // If the method returns a ref class, set the class of the spill temp // to the method's return value. We may update this later if it turns // out we can prove the method returns a more specific type. if (info.compRetType == TYP_REF) { // The return spill temp is single def only if the method has a single return block. if (retBlocks == 1) { lvaTable[lvaInlineeReturnSpillTemp].lvSingleDef = 1; JITDUMP("Marked return spill temp V%02u as a single def temp\n", lvaInlineeReturnSpillTemp); } CORINFO_CLASS_HANDLE retClassHnd = impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass; if (retClassHnd != nullptr) { lvaSetClass(lvaInlineeReturnSpillTemp, retClassHnd); } } } } return; } /* Mark all blocks within 'try' blocks as such */ if (info.compXcptnsCount == 0) { return; } if (info.compXcptnsCount > MAX_XCPTN_INDEX) { IMPL_LIMITATION("too many exception clauses"); } /* Allocate the exception handler table */ fgAllocEHTable(); /* Assume we don't need to sort the EH table (such that nested try/catch * appear before their try or handler parent). The EH verifier will notice * when we do need to sort it. */ fgNeedToSortEHTable = false; verInitEHTree(info.compXcptnsCount); EHNodeDsc* initRoot = ehnNext; // remember the original root since // it may get modified during insertion // Annotate BBs with exception handling information required for generating correct eh code // as well as checking for correct IL EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { CORINFO_EH_CLAUSE clause; info.compCompHnd->getEHinfo(info.compMethodHnd, XTnum, &clause); noway_assert(clause.HandlerLength != (unsigned)-1); // @DEPRECATED #ifdef DEBUG if (verbose) { dispIncomingEHClause(XTnum, clause); } #endif // DEBUG IL_OFFSET tryBegOff = clause.TryOffset; IL_OFFSET tryEndOff = tryBegOff + clause.TryLength; IL_OFFSET filterBegOff = 0; IL_OFFSET hndBegOff = clause.HandlerOffset; IL_OFFSET hndEndOff = hndBegOff + clause.HandlerLength; if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { filterBegOff = clause.FilterOffset; } if (tryEndOff > info.compILCodeSize) { BADCODE3("end of try block beyond end of method for try", " at offset %04X", tryBegOff); } if (hndEndOff > info.compILCodeSize) { BADCODE3("end of hnd block beyond end of method for try", " at offset %04X", tryBegOff); } HBtab->ebdTryBegOffset = tryBegOff; HBtab->ebdTryEndOffset = tryEndOff; HBtab->ebdFilterBegOffset = filterBegOff; HBtab->ebdHndBegOffset = hndBegOff; HBtab->ebdHndEndOffset = hndEndOff; /* Convert the various addresses to basic blocks */ BasicBlock* tryBegBB = fgLookupBB(tryBegOff); BasicBlock* tryEndBB = fgLookupBB(tryEndOff); // note: this can be NULL if the try region is at the end of the function BasicBlock* hndBegBB = fgLookupBB(hndBegOff); BasicBlock* hndEndBB = nullptr; BasicBlock* filtBB = nullptr; BasicBlock* block; // // Assert that the try/hnd beginning blocks are set up correctly // if (tryBegBB == nullptr) { BADCODE("Try Clause is invalid"); } if (hndBegBB == nullptr) { BADCODE("Handler Clause is invalid"); } #if HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION // This will change the block weight from 0 to 1 // and clear the rarely run flag hndBegBB->makeBlockHot(); #else hndBegBB->bbSetRunRarely(); // handler entry points are rarely executed #endif if (hndEndOff < info.compILCodeSize) { hndEndBB = fgLookupBB(hndEndOff); } if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { filtBB = HBtab->ebdFilter = fgLookupBB(clause.FilterOffset); filtBB->bbCatchTyp = BBCT_FILTER; hndBegBB->bbCatchTyp = BBCT_FILTER_HANDLER; #if HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION // This will change the block weight from 0 to 1 // and clear the rarely run flag filtBB->makeBlockHot(); #else filtBB->bbSetRunRarely(); // filter entry points are rarely executed #endif // Mark all BBs that belong to the filter with the XTnum of the corresponding handler for (block = filtBB; /**/; block = block->bbNext) { if (block == nullptr) { BADCODE3("Missing endfilter for filter", " at offset %04X", filtBB->bbCodeOffs); return; } // Still inside the filter block->setHndIndex(XTnum); if (block->bbJumpKind == BBJ_EHFILTERRET) { // Mark catch handler as successor. block->bbJumpDest = hndBegBB; assert(block->bbJumpDest->bbCatchTyp == BBCT_FILTER_HANDLER); break; } } if (!block->bbNext || block->bbNext != hndBegBB) { BADCODE3("Filter does not immediately precede handler for filter", " at offset %04X", filtBB->bbCodeOffs); } } else { HBtab->ebdTyp = clause.ClassToken; /* Set bbCatchTyp as appropriate */ if (clause.Flags & CORINFO_EH_CLAUSE_FINALLY) { hndBegBB->bbCatchTyp = BBCT_FINALLY; } else { if (clause.Flags & CORINFO_EH_CLAUSE_FAULT) { hndBegBB->bbCatchTyp = BBCT_FAULT; } else { hndBegBB->bbCatchTyp = clause.ClassToken; // These values should be non-zero value that will // not collide with real tokens for bbCatchTyp if (clause.ClassToken == 0) { BADCODE("Exception catch type is Null"); } noway_assert(clause.ClassToken != BBCT_FAULT); noway_assert(clause.ClassToken != BBCT_FINALLY); noway_assert(clause.ClassToken != BBCT_FILTER); noway_assert(clause.ClassToken != BBCT_FILTER_HANDLER); } } } /* Mark the initial block and last blocks in the 'try' region */ tryBegBB->bbFlags |= BBF_TRY_BEG; /* Prevent future optimizations of removing the first block */ /* of a TRY block and the first block of an exception handler */ tryBegBB->bbFlags |= BBF_DONT_REMOVE; hndBegBB->bbFlags |= BBF_DONT_REMOVE; hndBegBB->bbRefs++; // The first block of a handler gets an extra, "artificial" reference count. if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { filtBB->bbFlags |= BBF_DONT_REMOVE; filtBB->bbRefs++; // The first block of a filter gets an extra, "artificial" reference count. } tryBegBB->bbFlags |= BBF_DONT_REMOVE; hndBegBB->bbFlags |= BBF_DONT_REMOVE; // // Store the info to the table of EH block handlers // HBtab->ebdHandlerType = ToEHHandlerType(clause.Flags); HBtab->ebdTryBeg = tryBegBB; HBtab->ebdTryLast = (tryEndBB == nullptr) ? fgLastBB : tryEndBB->bbPrev; HBtab->ebdHndBeg = hndBegBB; HBtab->ebdHndLast = (hndEndBB == nullptr) ? fgLastBB : hndEndBB->bbPrev; // // Assert that all of our try/hnd blocks are setup correctly. // if (HBtab->ebdTryLast == nullptr) { BADCODE("Try Clause is invalid"); } if (HBtab->ebdHndLast == nullptr) { BADCODE("Handler Clause is invalid"); } // // Verify that it's legal // verInsertEhNode(&clause, HBtab); } // end foreach handler table entry fgSortEHTable(); // Next, set things related to nesting that depend on the sorting being complete. for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { /* Mark all blocks in the finally/fault or catch clause */ BasicBlock* tryBegBB = HBtab->ebdTryBeg; BasicBlock* hndBegBB = HBtab->ebdHndBeg; IL_OFFSET tryBegOff = HBtab->ebdTryBegOffset; IL_OFFSET tryEndOff = HBtab->ebdTryEndOffset; IL_OFFSET hndBegOff = HBtab->ebdHndBegOffset; IL_OFFSET hndEndOff = HBtab->ebdHndEndOffset; BasicBlock* block; for (block = hndBegBB; block && (block->bbCodeOffs < hndEndOff); block = block->bbNext) { if (!block->hasHndIndex()) { block->setHndIndex(XTnum); } // All blocks in a catch handler or filter are rarely run, except the entry if ((block != hndBegBB) && (hndBegBB->bbCatchTyp != BBCT_FINALLY)) { block->bbSetRunRarely(); } } /* Mark all blocks within the covered range of the try */ for (block = tryBegBB; block && (block->bbCodeOffs < tryEndOff); block = block->bbNext) { /* Mark this BB as belonging to a 'try' block */ if (!block->hasTryIndex()) { block->setTryIndex(XTnum); } #ifdef DEBUG /* Note: the BB can't span the 'try' block */ if (!(block->bbFlags & BBF_INTERNAL)) { noway_assert(tryBegOff <= block->bbCodeOffs); noway_assert(tryEndOff >= block->bbCodeOffsEnd || tryEndOff == tryBegOff); } #endif } /* Init ebdHandlerNestingLevel of current clause, and bump up value for all * enclosed clauses (which have to be before it in the table). * Innermost try-finally blocks must precede outermost * try-finally blocks. */ #if !defined(FEATURE_EH_FUNCLETS) HBtab->ebdHandlerNestingLevel = 0; #endif // !FEATURE_EH_FUNCLETS HBtab->ebdEnclosingTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; HBtab->ebdEnclosingHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; noway_assert(XTnum < compHndBBtabCount); noway_assert(XTnum == ehGetIndex(HBtab)); for (EHblkDsc* xtab = compHndBBtab; xtab < HBtab; xtab++) { #if !defined(FEATURE_EH_FUNCLETS) if (jitIsBetween(xtab->ebdHndBegOffs(), hndBegOff, hndEndOff)) { xtab->ebdHandlerNestingLevel++; } #endif // !FEATURE_EH_FUNCLETS /* If we haven't recorded an enclosing try index for xtab then see * if this EH region should be recorded. We check if the * first offset in the xtab lies within our region. If so, * the last offset also must lie within the region, due to * nesting rules. verInsertEhNode(), below, will check for proper nesting. */ if (xtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { bool begBetween = jitIsBetween(xtab->ebdTryBegOffs(), tryBegOff, tryEndOff); if (begBetween) { // Record the enclosing scope link xtab->ebdEnclosingTryIndex = (unsigned short)XTnum; } } /* Do the same for the enclosing handler index. */ if (xtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) { bool begBetween = jitIsBetween(xtab->ebdTryBegOffs(), hndBegOff, hndEndOff); if (begBetween) { // Record the enclosing scope link xtab->ebdEnclosingHndIndex = (unsigned short)XTnum; } } } } // end foreach handler table entry #if !defined(FEATURE_EH_FUNCLETS) for (EHblkDsc* const HBtab : EHClauses(this)) { if (ehMaxHndNestingCount <= HBtab->ebdHandlerNestingLevel) ehMaxHndNestingCount = HBtab->ebdHandlerNestingLevel + 1; } #endif // !FEATURE_EH_FUNCLETS { // always run these checks for a debug build verCheckNestingLevel(initRoot); } #ifndef DEBUG // fgNormalizeEH assumes that this test has been passed. And Ssa assumes that fgNormalizeEHTable // has been run. So do this unless we're in minOpts mode (and always in debug). if (!opts.MinOpts()) #endif { fgCheckBasicBlockControlFlow(); } #ifdef DEBUG if (verbose) { JITDUMP("*************** After fgFindBasicBlocks() has created the EH table\n"); fgDispHandlerTab(); } // We can't verify the handler table until all the IL legality checks have been done (above), since bad IL // (such as illegal nesting of regions) will trigger asserts here. fgVerifyHandlerTab(); #endif fgNormalizeEH(); fgCheckForLoopsInHandlers(); } //------------------------------------------------------------------------ // fgCheckForLoopsInHandlers: scan blocks seeing if any handler block // is a backedge target. // // Notes: // Sets compHasBackwardJumpInHandler if so. This will disable // setting patchpoints in this method and prompt the jit to // optimize the method instead. // // We assume any late-added handler (say for synchronized methods) will // not introduce any loops. // void Compiler::fgCheckForLoopsInHandlers() { // We only care about this if we are going to set OSR patchpoints // and the method has exception handling. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { return; } if (JitConfig.TC_OnStackReplacement() == 0) { return; } if (info.compXcptnsCount == 0) { return; } // Walk blocks in handlers and filters, looing for a backedge target. // assert(!compHasBackwardJumpInHandler); for (BasicBlock* const blk : Blocks()) { if (blk->hasHndIndex()) { if (blk->bbFlags & BBF_BACKWARD_JUMP_TARGET) { JITDUMP("\nHander block " FMT_BB "is backward jump target; can't have patchpoints in this method\n"); compHasBackwardJumpInHandler = true; break; } } } } //------------------------------------------------------------------------ // fgFixEntryFlowForOSR: add control flow path from method start to // the appropriate IL offset for the OSR method // // Notes: // This is simply a branch from the method entry to the OSR entry -- // the block where the OSR method should begin execution. // // If the OSR entry is within a try we will eventually need add // suitable step blocks to reach the OSR entry without jumping into // the middle of the try. But we defer that until after importation. // See fgPostImportationCleanup. // void Compiler::fgFixEntryFlowForOSR() { // Ensure lookup IL->BB lookup table is valid // fgInitBBLookup(); // Remember the original entry block in case this method is tail recursive. // fgEntryBB = fgLookupBB(0); // Find the OSR entry block. // assert(info.compILEntry >= 0); BasicBlock* const osrEntry = fgLookupBB(info.compILEntry); // Remember the OSR entry block so we can find it again later. // fgOSREntryBB = osrEntry; // Now branch from method start to the right spot. // fgEnsureFirstBBisScratch(); fgFirstBB->bbJumpKind = BBJ_ALWAYS; fgFirstBB->bbJumpDest = osrEntry; fgAddRefPred(osrEntry, fgFirstBB); JITDUMP("OSR: redirecting flow at entry from entry " FMT_BB " to OSR entry " FMT_BB " for the importer\n", fgFirstBB->bbNum, osrEntry->bbNum); } /***************************************************************************** * Check control flow constraints for well formed IL. Bail if any of the constraints * are violated. */ void Compiler::fgCheckBasicBlockControlFlow() { assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks EHblkDsc* HBtab; for (BasicBlock* const blk : Blocks()) { if (blk->bbFlags & BBF_INTERNAL) { continue; } switch (blk->bbJumpKind) { case BBJ_NONE: // block flows into the next one (no jump) fgControlFlowPermitted(blk, blk->bbNext); break; case BBJ_ALWAYS: // block does unconditional jump to target fgControlFlowPermitted(blk, blk->bbJumpDest); break; case BBJ_COND: // block conditionally jumps to the target fgControlFlowPermitted(blk, blk->bbNext); fgControlFlowPermitted(blk, blk->bbJumpDest); break; case BBJ_RETURN: // block ends with 'ret' if (blk->hasTryIndex() || blk->hasHndIndex()) { BADCODE3("Return from a protected block", ". Before offset %04X", blk->bbCodeOffsEnd); } break; case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: if (!blk->hasHndIndex()) // must be part of a handler { BADCODE3("Missing handler", ". Before offset %04X", blk->bbCodeOffsEnd); } HBtab = ehGetDsc(blk->getHndIndex()); // Endfilter allowed only in a filter block if (blk->bbJumpKind == BBJ_EHFILTERRET) { if (!HBtab->HasFilter()) { BADCODE("Unexpected endfilter"); } } // endfinally allowed only in a finally/fault block else if (!HBtab->HasFinallyOrFaultHandler()) { BADCODE("Unexpected endfinally"); } // The handler block should be the innermost block // Exception blocks are listed, innermost first. if (blk->hasTryIndex() && (blk->getTryIndex() < blk->getHndIndex())) { BADCODE("endfinally / endfilter in nested try block"); } break; case BBJ_THROW: // block ends with 'throw' /* throw is permitted from every BB, so nothing to check */ /* importer makes sure that rethrow is done from a catch */ break; case BBJ_LEAVE: // block always jumps to the target, maybe out of guarded // region. Used temporarily until importing fgControlFlowPermitted(blk, blk->bbJumpDest, true); break; case BBJ_SWITCH: // block ends with a switch statement for (BasicBlock* const bTarget : blk->SwitchTargets()) { fgControlFlowPermitted(blk, bTarget); } break; case BBJ_EHCATCHRET: // block ends with a leave out of a catch (only #if defined(FEATURE_EH_FUNCLETS)) case BBJ_CALLFINALLY: // block always calls the target finally default: noway_assert(!"Unexpected bbJumpKind"); // these blocks don't get created until importing break; } } } /**************************************************************************** * Check that the leave from the block is legal. * Consider removing this check here if we can do it cheaply during importing */ void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, bool isLeave) { assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks unsigned srcHndBeg, destHndBeg; unsigned srcHndEnd, destHndEnd; bool srcInFilter, destInFilter; bool srcInCatch = false; EHblkDsc* srcHndTab; srcHndTab = ehInitHndRange(blkSrc, &srcHndBeg, &srcHndEnd, &srcInFilter); ehInitHndRange(blkDest, &destHndBeg, &destHndEnd, &destInFilter); /* Impose the rules for leaving or jumping from handler blocks */ if (blkSrc->hasHndIndex()) { srcInCatch = srcHndTab->HasCatchHandler() && srcHndTab->InHndRegionILRange(blkSrc); /* Are we jumping within the same handler index? */ if (BasicBlock::sameHndRegion(blkSrc, blkDest)) { /* Do we have a filter clause? */ if (srcHndTab->HasFilter()) { /* filters and catch handlers share same eh index */ /* we need to check for control flow between them. */ if (srcInFilter != destInFilter) { if (!jitIsBetween(blkDest->bbCodeOffs, srcHndBeg, srcHndEnd)) { BADCODE3("Illegal control flow between filter and handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } } } else { /* The handler indexes of blkSrc and blkDest are different */ if (isLeave) { /* Any leave instructions must not enter the dest handler from outside*/ if (!jitIsBetween(srcHndBeg, destHndBeg, destHndEnd)) { BADCODE3("Illegal use of leave to enter handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } else { /* We must use a leave to exit a handler */ BADCODE3("Illegal control flow out of a handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } /* Do we have a filter clause? */ if (srcHndTab->HasFilter()) { /* It is ok to leave from the handler block of a filter, */ /* but not from the filter block of a filter */ if (srcInFilter != destInFilter) { BADCODE3("Illegal to leave a filter handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } /* We should never leave a finally handler */ if (srcHndTab->HasFinallyHandler()) { BADCODE3("Illegal to leave a finally handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } /* We should never leave a fault handler */ if (srcHndTab->HasFaultHandler()) { BADCODE3("Illegal to leave a fault handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } } else if (blkDest->hasHndIndex()) { /* blkSrc was not inside a handler, but blkDst is inside a handler */ BADCODE3("Illegal control flow into a handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } /* Are we jumping from a catch handler into the corresponding try? */ /* VB uses this for "on error goto " */ if (isLeave && srcInCatch) { // inspect all handlers containing the jump source bool bValidJumpToTry = false; // are we jumping in a valid way from a catch to the corresponding try? bool bCatchHandlerOnly = true; // false if we are jumping out of a non-catch handler EHblkDsc* ehTableEnd; EHblkDsc* ehDsc; for (ehDsc = compHndBBtab, ehTableEnd = compHndBBtab + compHndBBtabCount; bCatchHandlerOnly && ehDsc < ehTableEnd; ehDsc++) { if (ehDsc->InHndRegionILRange(blkSrc)) { if (ehDsc->HasCatchHandler()) { if (ehDsc->InTryRegionILRange(blkDest)) { // If we already considered the jump for a different try/catch, // we would have two overlapping try regions with two overlapping catch // regions, which is illegal. noway_assert(!bValidJumpToTry); // Allowed if it is the first instruction of an inner try // (and all trys in between) // // try { // .. // _tryAgain: // .. // try { // _tryNestedInner: // .. // try { // _tryNestedIllegal: // .. // } catch { // .. // } // .. // } catch { // .. // } // .. // } catch { // .. // leave _tryAgain // Allowed // .. // leave _tryNestedInner // Allowed // .. // leave _tryNestedIllegal // Not Allowed // .. // } // // Note: The leave is allowed also from catches nested inside the catch shown above. /* The common case where leave is to the corresponding try */ if (ehDsc->ebdIsSameTry(this, blkDest->getTryIndex()) || /* Also allowed is a leave to the start of a try which starts in the handler's try */ fgFlowToFirstBlockOfInnerTry(ehDsc->ebdTryBeg, blkDest, false)) { bValidJumpToTry = true; } } } else { // We are jumping from a handler which is not a catch handler. // If it's a handler, but not a catch handler, it must be either a finally or fault if (!ehDsc->HasFinallyOrFaultHandler()) { BADCODE3("Handlers must be catch, finally, or fault", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } // Are we jumping out of this handler? if (!ehDsc->InHndRegionILRange(blkDest)) { bCatchHandlerOnly = false; } } } else if (ehDsc->InFilterRegionILRange(blkSrc)) { // Are we jumping out of a filter? if (!ehDsc->InFilterRegionILRange(blkDest)) { bCatchHandlerOnly = false; } } } if (bCatchHandlerOnly) { if (bValidJumpToTry) { return; } else { // FALL THROUGH // This is either the case of a leave to outside the try/catch, // or a leave to a try not nested in this try/catch. // The first case is allowed, the second one will be checked // later when we check the try block rules (it is illegal if we // jump to the middle of the destination try). } } else { BADCODE3("illegal leave to exit a finally, fault or filter", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } /* Check all the try block rules */ IL_OFFSET srcTryBeg; IL_OFFSET srcTryEnd; IL_OFFSET destTryBeg; IL_OFFSET destTryEnd; ehInitTryRange(blkSrc, &srcTryBeg, &srcTryEnd); ehInitTryRange(blkDest, &destTryBeg, &destTryEnd); /* Are we jumping between try indexes? */ if (!BasicBlock::sameTryRegion(blkSrc, blkDest)) { // Are we exiting from an inner to outer try? if (jitIsBetween(srcTryBeg, destTryBeg, destTryEnd) && jitIsBetween(srcTryEnd - 1, destTryBeg, destTryEnd)) { if (!isLeave) { BADCODE3("exit from try block without a leave", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } else if (jitIsBetween(destTryBeg, srcTryBeg, srcTryEnd)) { // check that the dest Try is first instruction of an inner try if (!fgFlowToFirstBlockOfInnerTry(blkSrc, blkDest, false)) { BADCODE3("control flow into middle of try", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } else // there is no nesting relationship between src and dest { if (isLeave) { // check that the dest Try is first instruction of an inner try sibling if (!fgFlowToFirstBlockOfInnerTry(blkSrc, blkDest, true)) { BADCODE3("illegal leave into middle of try", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } else { BADCODE3("illegal control flow in to/out of try block", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } } } /***************************************************************************** * Check that blkDest is the first block of an inner try or a sibling * with no intervening trys in between */ bool Compiler::fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling) { assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks noway_assert(blkDest->hasTryIndex()); unsigned XTnum = blkDest->getTryIndex(); unsigned lastXTnum = blkSrc->hasTryIndex() ? blkSrc->getTryIndex() : compHndBBtabCount; noway_assert(XTnum < compHndBBtabCount); noway_assert(lastXTnum <= compHndBBtabCount); EHblkDsc* HBtab = ehGetDsc(XTnum); // check that we are not jumping into middle of try if (HBtab->ebdTryBeg != blkDest) { return false; } if (sibling) { noway_assert(!BasicBlock::sameTryRegion(blkSrc, blkDest)); // find the l.u.b of the two try ranges // Set lastXTnum to the l.u.b. HBtab = ehGetDsc(lastXTnum); for (lastXTnum++, HBtab++; lastXTnum < compHndBBtabCount; lastXTnum++, HBtab++) { if (jitIsBetweenInclusive(blkDest->bbNum, HBtab->ebdTryBeg->bbNum, HBtab->ebdTryLast->bbNum)) { break; } } } // now check there are no intervening trys between dest and l.u.b // (it is ok to have intervening trys as long as they all start at // the same code offset) HBtab = ehGetDsc(XTnum); for (XTnum++, HBtab++; XTnum < lastXTnum; XTnum++, HBtab++) { if (HBtab->ebdTryBeg->bbNum < blkDest->bbNum && blkDest->bbNum <= HBtab->ebdTryLast->bbNum) { return false; } } return true; } /***************************************************************************** * Returns the handler nesting level of the block. * *pFinallyNesting is set to the nesting level of the inner-most * finally-protected try the block is in. */ unsigned Compiler::fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting) { unsigned curNesting = 0; // How many handlers is the block in unsigned tryFin = (unsigned)-1; // curNesting when we see innermost finally-protected try unsigned XTnum; EHblkDsc* HBtab; /* We find the block's handler nesting level by walking over the complete exception table and find enclosing clauses. */ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { noway_assert(HBtab->ebdTryBeg && HBtab->ebdHndBeg); if (HBtab->HasFinallyHandler() && (tryFin == (unsigned)-1) && bbInTryRegions(XTnum, block)) { tryFin = curNesting; } else if (bbInHandlerRegions(XTnum, block)) { curNesting++; } } if (tryFin == (unsigned)-1) { tryFin = curNesting; } if (pFinallyNesting) { *pFinallyNesting = curNesting - tryFin; } return curNesting; } //------------------------------------------------------------------------ // fgFindBlockILOffset: Given a block, find the IL offset corresponding to the first statement // in the block with a legal IL offset. Skip any leading statements that have BAD_IL_OFFSET. // If no statement has an initialized statement offset (including the case where there are // no statements in the block), then return BAD_IL_OFFSET. This function is used when // blocks are split or modified, and we want to maintain the IL offset as much as possible // to preserve good debugging behavior. // // Arguments: // block - The block to check. // // Return Value: // The first good IL offset of a statement in the block, or BAD_IL_OFFSET if such an IL offset // cannot be found. // IL_OFFSET Compiler::fgFindBlockILOffset(BasicBlock* block) { // This function searches for IL offsets in statement nodes, so it can't be used in LIR. We // could have a similar function for LIR that searches for GT_IL_OFFSET nodes. assert(!block->IsLIR()); for (Statement* const stmt : block->Statements()) { // Blocks always contain IL offsets in the root. DebugInfo di = stmt->GetDebugInfo().GetRoot(); if (di.IsValid()) { return di.GetLocation().GetOffset(); } } return BAD_IL_OFFSET; } //------------------------------------------------------------------------------ // fgSplitBlockAtEnd - split the given block into two blocks. // All code in the block stays in the original block. // Control falls through from original to new block, and // the new block is returned. //------------------------------------------------------------------------------ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) { // We'd like to use fgNewBBafter(), but we need to update the preds list before linking in the new block. // (We need the successors of 'curr' to be correct when we do this.) BasicBlock* newBlock = bbNewBasicBlock(curr->bbJumpKind); // Start the new block with no refs. When we set the preds below, this will get updated correctly. newBlock->bbRefs = 0; // For each successor of the original block, set the new block as their predecessor. // Note we are using the "rational" version of the successor iterator that does not hide the finallyret arcs. // Without these arcs, a block 'b' may not be a member of succs(preds(b)) if (curr->bbJumpKind != BBJ_SWITCH) { for (BasicBlock* const succ : curr->Succs(this)) { if (succ != newBlock) { JITDUMP(FMT_BB " previous predecessor was " FMT_BB ", now is " FMT_BB "\n", succ->bbNum, curr->bbNum, newBlock->bbNum); fgReplacePred(succ, curr, newBlock); } } newBlock->bbJumpDest = curr->bbJumpDest; curr->bbJumpDest = nullptr; } else { // In the case of a switch statement there's more complicated logic in order to wire up the predecessor lists // but fortunately there's an existing method that implements this functionality. newBlock->bbJumpSwt = curr->bbJumpSwt; fgChangeSwitchBlock(curr, newBlock); curr->bbJumpSwt = nullptr; } newBlock->inheritWeight(curr); // Set the new block's flags. Note that the new block isn't BBF_INTERNAL unless the old block is. newBlock->bbFlags = curr->bbFlags; // Remove flags that the new block can't have. newBlock->bbFlags &= ~(BBF_TRY_BEG | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_FUNCLET_BEG | BBF_LOOP_PREHEADER | BBF_KEEP_BBJ_ALWAYS | BBF_PATCHPOINT | BBF_BACKWARD_JUMP_TARGET | BBF_LOOP_ALIGN); // Remove the GC safe bit on the new block. It seems clear that if we split 'curr' at the end, // such that all the code is left in 'curr', and 'newBlock' just gets the control flow, then // both 'curr' and 'newBlock' could accurately retain an existing GC safe bit. However, callers // use this function to split blocks in the middle, or at the beginning, and they don't seem to // be careful about updating this flag appropriately. So, removing the GC safe bit is simply // conservative: some functions might end up being fully interruptible that could be partially // interruptible if we exercised more care here. newBlock->bbFlags &= ~BBF_GC_SAFE_POINT; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) newBlock->bbFlags &= ~(BBF_FINALLY_TARGET); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // The new block has no code, so we leave bbCodeOffs/bbCodeOffsEnd set to BAD_IL_OFFSET. If a caller // puts code in the block, then it needs to update these. // Insert the new block in the block list after the 'curr' block. fgInsertBBafter(curr, newBlock); fgExtendEHRegionAfter(curr); // The new block is in the same EH region as the old block. // Remove flags from the old block that are no longer possible. curr->bbFlags &= ~(BBF_HAS_JMP | BBF_RETLESS_CALL); // Default to fallthru, and add the arc for that. curr->bbJumpKind = BBJ_NONE; fgAddRefPred(newBlock, curr); return newBlock; } //------------------------------------------------------------------------------ // fgSplitBlockAfterStatement - Split the given block, with all code after // the given statement going into the second block. //------------------------------------------------------------------------------ BasicBlock* Compiler::fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt) { assert(!curr->IsLIR()); // No statements in LIR, so you can't use this function. BasicBlock* newBlock = fgSplitBlockAtEnd(curr); if (stmt != nullptr) { newBlock->bbStmtList = stmt->GetNextStmt(); if (newBlock->bbStmtList != nullptr) { newBlock->bbStmtList->SetPrevStmt(curr->bbStmtList->GetPrevStmt()); } curr->bbStmtList->SetPrevStmt(stmt); stmt->SetNextStmt(nullptr); // Update the IL offsets of the blocks to match the split. assert(newBlock->bbCodeOffs == BAD_IL_OFFSET); assert(newBlock->bbCodeOffsEnd == BAD_IL_OFFSET); // curr->bbCodeOffs remains the same newBlock->bbCodeOffsEnd = curr->bbCodeOffsEnd; IL_OFFSET splitPointILOffset = fgFindBlockILOffset(newBlock); curr->bbCodeOffsEnd = splitPointILOffset; newBlock->bbCodeOffs = splitPointILOffset; } else { assert(curr->bbStmtList == nullptr); // if no tree was given then it better be an empty block } return newBlock; } //------------------------------------------------------------------------------ // fgSplitBlockAfterNode - Split the given block, with all code after // the given node going into the second block. // This function is only used in LIR. //------------------------------------------------------------------------------ BasicBlock* Compiler::fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node) { assert(curr->IsLIR()); BasicBlock* newBlock = fgSplitBlockAtEnd(curr); if (node != nullptr) { LIR::Range& currBBRange = LIR::AsRange(curr); if (node != currBBRange.LastNode()) { LIR::Range nodesToMove = currBBRange.Remove(node->gtNext, currBBRange.LastNode()); LIR::AsRange(newBlock).InsertAtBeginning(std::move(nodesToMove)); } // Update the IL offsets of the blocks to match the split. assert(newBlock->bbCodeOffs == BAD_IL_OFFSET); assert(newBlock->bbCodeOffsEnd == BAD_IL_OFFSET); // curr->bbCodeOffs remains the same newBlock->bbCodeOffsEnd = curr->bbCodeOffsEnd; // Search backwards from the end of the current block looking for the IL offset to use // for the end IL offset for the original block. IL_OFFSET splitPointILOffset = BAD_IL_OFFSET; LIR::Range::ReverseIterator riter; LIR::Range::ReverseIterator riterEnd; for (riter = currBBRange.rbegin(), riterEnd = currBBRange.rend(); riter != riterEnd; ++riter) { if ((*riter)->gtOper == GT_IL_OFFSET) { GenTreeILOffset* ilOffset = (*riter)->AsILOffset(); DebugInfo rootDI = ilOffset->gtStmtDI.GetRoot(); if (rootDI.IsValid()) { splitPointILOffset = rootDI.GetLocation().GetOffset(); break; } } } curr->bbCodeOffsEnd = splitPointILOffset; // Also use this as the beginning offset of the next block. Presumably we could/should // look to see if the first node is a GT_IL_OFFSET node, and use that instead. newBlock->bbCodeOffs = splitPointILOffset; } else { assert(curr->bbStmtList == nullptr); // if no node was given then it better be an empty block } return newBlock; } //------------------------------------------------------------------------------ // fgSplitBlockAtBeginning - Split the given block into two blocks. // Control falls through from original to new block, // and the new block is returned. // All code in the original block goes into the new block //------------------------------------------------------------------------------ BasicBlock* Compiler::fgSplitBlockAtBeginning(BasicBlock* curr) { BasicBlock* newBlock = fgSplitBlockAtEnd(curr); if (curr->IsLIR()) { newBlock->SetFirstLIRNode(curr->GetFirstLIRNode()); curr->SetFirstLIRNode(nullptr); } else { newBlock->bbStmtList = curr->bbStmtList; curr->bbStmtList = nullptr; } // The new block now has all the code, and the old block has none. Update the // IL offsets for the block to reflect this. newBlock->bbCodeOffs = curr->bbCodeOffs; newBlock->bbCodeOffsEnd = curr->bbCodeOffsEnd; curr->bbCodeOffs = BAD_IL_OFFSET; curr->bbCodeOffsEnd = BAD_IL_OFFSET; return newBlock; } //------------------------------------------------------------------------ // fgSplitEdge: Splits the edge between a block 'curr' and its successor 'succ' by creating a new block // that replaces 'succ' as a successor of 'curr', and which branches unconditionally // to (or falls through to) 'succ'. Note that for a BBJ_COND block 'curr', // 'succ' might be the fall-through path or the branch path from 'curr'. // // Arguments: // curr - A block which branches to 'succ' // succ - The target block // // Return Value: // Returns a new block, that is a successor of 'curr' and which branches unconditionally to 'succ' // // Assumptions: // 'curr' must have a bbJumpKind of BBJ_COND, BBJ_ALWAYS, or BBJ_SWITCH // // Notes: // The returned block is empty. // Can be invoked before pred lists are built. BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) { assert(curr->KindIs(BBJ_COND, BBJ_SWITCH, BBJ_ALWAYS)); if (fgComputePredsDone) { assert(fgGetPredForBlock(succ, curr) != nullptr); } BasicBlock* newBlock; if (succ == curr->bbNext) { // The successor is the fall-through path of a BBJ_COND, or // an immediately following block of a BBJ_SWITCH (which has // no fall-through path). For this case, simply insert a new // fall-through block after 'curr'. newBlock = fgNewBBafter(BBJ_NONE, curr, true /*extendRegion*/); } else { newBlock = fgNewBBinRegion(BBJ_ALWAYS, curr, curr->isRunRarely()); // The new block always jumps to 'succ' newBlock->bbJumpDest = succ; } newBlock->bbFlags |= (curr->bbFlags & succ->bbFlags & (BBF_BACKWARD_JUMP)); JITDUMP("Splitting edge from " FMT_BB " to " FMT_BB "; adding " FMT_BB "\n", curr->bbNum, succ->bbNum, newBlock->bbNum); if (curr->bbJumpKind == BBJ_COND) { fgReplacePred(succ, curr, newBlock); if (curr->bbJumpDest == succ) { // Now 'curr' jumps to newBlock curr->bbJumpDest = newBlock; } fgAddRefPred(newBlock, curr); } else if (curr->bbJumpKind == BBJ_SWITCH) { // newBlock replaces 'succ' in the switch. fgReplaceSwitchJumpTarget(curr, newBlock, succ); // And 'succ' has 'newBlock' as a new predecessor. fgAddRefPred(succ, newBlock); } else { assert(curr->bbJumpKind == BBJ_ALWAYS); fgReplacePred(succ, curr, newBlock); curr->bbJumpDest = newBlock; fgAddRefPred(newBlock, curr); } // This isn't accurate, but it is complex to compute a reasonable number so just assume that we take the // branch 50% of the time. // if (curr->bbJumpKind != BBJ_ALWAYS) { newBlock->inheritWeightPercentage(curr, 50); } // The bbLiveIn and bbLiveOut are both equal to the bbLiveIn of 'succ' if (fgLocalVarLivenessDone) { VarSetOps::Assign(this, newBlock->bbLiveIn, succ->bbLiveIn); VarSetOps::Assign(this, newBlock->bbLiveOut, succ->bbLiveIn); } return newBlock; } // Removes the block from the bbPrev/bbNext chain // Updates fgFirstBB and fgLastBB if necessary // Does not update fgFirstFuncletBB or fgFirstColdBlock (fgUnlinkRange does) void Compiler::fgUnlinkBlock(BasicBlock* block) { if (block->bbPrev) { block->bbPrev->bbNext = block->bbNext; if (block->bbNext) { block->bbNext->bbPrev = block->bbPrev; } else { fgLastBB = block->bbPrev; } } else { assert(block == fgFirstBB); assert(block != fgLastBB); assert((fgFirstBBScratch == nullptr) || (fgFirstBBScratch == fgFirstBB)); fgFirstBB = block->bbNext; fgFirstBB->bbPrev = nullptr; if (fgFirstBBScratch != nullptr) { #ifdef DEBUG // We had created an initial scratch BB, but now we're deleting it. if (verbose) { printf("Unlinking scratch " FMT_BB "\n", block->bbNum); } #endif // DEBUG fgFirstBBScratch = nullptr; } } } /***************************************************************************************************** * * Function called to unlink basic block range [bBeg .. bEnd] from the basic block list. * * 'bBeg' can't be the first block. */ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) { assert(bBeg != nullptr); assert(bEnd != nullptr); BasicBlock* bPrev = bBeg->bbPrev; assert(bPrev != nullptr); // Can't unlink a range starting with the first block bPrev->setNext(bEnd->bbNext); /* If we removed the last block in the method then update fgLastBB */ if (fgLastBB == bEnd) { fgLastBB = bPrev; noway_assert(fgLastBB->bbNext == nullptr); } // If bEnd was the first Cold basic block update fgFirstColdBlock if (fgFirstColdBlock == bEnd) { fgFirstColdBlock = bPrev->bbNext; } #if defined(FEATURE_EH_FUNCLETS) #ifdef DEBUG // You can't unlink a range that includes the first funclet block. A range certainly // can't cross the non-funclet/funclet region. And you can't unlink the first block // of the first funclet with this, either. (If that's necessary, it could be allowed // by updating fgFirstFuncletBB to bEnd->bbNext.) for (BasicBlock* tempBB = bBeg; tempBB != bEnd->bbNext; tempBB = tempBB->bbNext) { assert(tempBB != fgFirstFuncletBB); } #endif // DEBUG #endif // FEATURE_EH_FUNCLETS } /***************************************************************************************************** * * Function called to remove a basic block */ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) { /* The block has to be either unreachable or empty */ PREFIX_ASSUME(block != nullptr); BasicBlock* bPrev = block->bbPrev; JITDUMP("fgRemoveBlock " FMT_BB ", unreachable=%s\n", block->bbNum, dspBool(unreachable)); // If we've cached any mappings from switch blocks to SwitchDesc's (which contain only the // *unique* successors of the switch block), invalidate that cache, since an entry in one of // the SwitchDescs might be removed. InvalidateUniqueSwitchSuccMap(); noway_assert((block == fgFirstBB) || (bPrev && (bPrev->bbNext == block))); noway_assert(!(block->bbFlags & BBF_DONT_REMOVE)); // Should never remove a genReturnBB, as we might have special hookups there. noway_assert(block != genReturnBB); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't remove a finally target assert(!(block->bbFlags & BBF_FINALLY_TARGET)); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (unreachable) { PREFIX_ASSUME(bPrev != nullptr); fgUnreachableBlock(block); #if defined(FEATURE_EH_FUNCLETS) // If block was the fgFirstFuncletBB then set fgFirstFuncletBB to block->bbNext if (block == fgFirstFuncletBB) { fgFirstFuncletBB = block->bbNext; } #endif // FEATURE_EH_FUNCLETS if (bPrev->bbJumpKind == BBJ_CALLFINALLY) { // bPrev CALL becomes RETLESS as the BBJ_ALWAYS block is unreachable bPrev->bbFlags |= BBF_RETLESS_CALL; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) NO_WAY("No retless call finally blocks; need unwind target instead"); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else if (bPrev->bbJumpKind == BBJ_ALWAYS && bPrev->bbJumpDest == block->bbNext && !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && (block->bbNext != fgFirstColdBlock)) { // previous block is a BBJ_ALWAYS to the next block: change to BBJ_NONE. // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), // because that would violate our invariant that BBJ_CALLFINALLY blocks are followed by // BBJ_ALWAYS blocks. bPrev->bbJumpKind = BBJ_NONE; } // If this is the first Cold basic block update fgFirstColdBlock if (block == fgFirstColdBlock) { fgFirstColdBlock = block->bbNext; } /* Unlink this block from the bbNext chain */ fgUnlinkBlock(block); /* At this point the bbPreds and bbRefs had better be zero */ noway_assert((block->bbRefs == 0) && (block->bbPreds == nullptr)); /* A BBJ_CALLFINALLY is usually paired with a BBJ_ALWAYS. * If we delete such a BBJ_CALLFINALLY we also delete the BBJ_ALWAYS */ if (block->isBBCallAlwaysPair()) { BasicBlock* leaveBlk = block->bbNext; noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS); leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; leaveBlk->bbRefs = 0; leaveBlk->bbPreds = nullptr; fgRemoveBlock(leaveBlk, /* unreachable */ true); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) fgClearFinallyTargetBit(leaveBlk->bbJumpDest); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else if (block->bbJumpKind == BBJ_RETURN) { fgRemoveReturnBlock(block); } } else // block is empty { noway_assert(block->isEmpty()); // The block cannot follow a non-retless BBJ_CALLFINALLY (because we don't know who may jump to it). noway_assert(!block->isBBCallAlwaysPairTail()); /* This cannot be the last basic block */ noway_assert(block != fgLastBB); #ifdef DEBUG if (verbose) { printf("Removing empty " FMT_BB "\n", block->bbNum); } #endif // DEBUG #ifdef DEBUG /* Some extra checks for the empty case */ switch (block->bbJumpKind) { case BBJ_NONE: break; case BBJ_ALWAYS: /* Do not remove a block that jumps to itself - used for while (true){} */ noway_assert(block->bbJumpDest != block); /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ noway_assert(bPrev && bPrev->bbJumpKind == BBJ_NONE); break; default: noway_assert(!"Empty block of this type cannot be removed!"); break; } #endif // DEBUG noway_assert(block->KindIs(BBJ_NONE, BBJ_ALWAYS)); /* Who is the "real" successor of this block? */ BasicBlock* succBlock; if (block->bbJumpKind == BBJ_ALWAYS) { succBlock = block->bbJumpDest; } else { succBlock = block->bbNext; } bool skipUnmarkLoop = false; // If block is the backedge for a loop and succBlock precedes block // then the succBlock becomes the new LOOP HEAD // NOTE: there's an assumption here that the blocks are numbered in increasing bbNext order. // NOTE 2: if fgDomsComputed is false, then we can't check reachability. However, if this is // the case, then the loop structures probably are also invalid, and shouldn't be used. This // can be the case late in compilation (such as Lower), where remnants of earlier created // structures exist, but haven't been maintained. if (block->isLoopHead() && (succBlock->bbNum <= block->bbNum)) { succBlock->bbFlags |= BBF_LOOP_HEAD; if (block->isLoopAlign()) { loopAlignCandidates++; succBlock->bbFlags |= BBF_LOOP_ALIGN; JITDUMP("Propagating LOOP_ALIGN flag from " FMT_BB " to " FMT_BB " for " FMT_LP "\n ", block->bbNum, succBlock->bbNum, block->bbNatLoopNum); } if (fgDomsComputed && fgReachable(succBlock, block)) { // Mark all the reachable blocks between 'succBlock' and 'bPrev' optScaleLoopBlocks(succBlock, bPrev); } } else if (succBlock->isLoopHead() && bPrev && (succBlock->bbNum <= bPrev->bbNum)) { skipUnmarkLoop = true; } // If this is the first Cold basic block update fgFirstColdBlock if (block == fgFirstColdBlock) { fgFirstColdBlock = block->bbNext; } #if defined(FEATURE_EH_FUNCLETS) // Update fgFirstFuncletBB if necessary if (block == fgFirstFuncletBB) { fgFirstFuncletBB = block->bbNext; } #endif // FEATURE_EH_FUNCLETS /* First update the loop table and bbWeights */ optUpdateLoopsBeforeRemoveBlock(block, skipUnmarkLoop); // Update successor block start IL offset, if empty predecessor // covers the immediately preceding range. if ((block->bbCodeOffsEnd == succBlock->bbCodeOffs) && (block->bbCodeOffs != BAD_IL_OFFSET)) { assert(block->bbCodeOffs <= succBlock->bbCodeOffs); succBlock->bbCodeOffs = block->bbCodeOffs; } /* Remove the block */ if (bPrev == nullptr) { /* special case if this is the first BB */ noway_assert(block == fgFirstBB); /* Must be a fall through to next block */ noway_assert(block->bbJumpKind == BBJ_NONE); /* old block no longer gets the extra ref count for being the first block */ block->bbRefs--; succBlock->bbRefs++; } /* Update bbRefs and bbPreds. * All blocks jumping to 'block' now jump to 'succBlock'. * First, remove 'block' from the predecessor list of succBlock. */ fgRemoveRefPred(succBlock, block); for (flowList* const pred : block->PredEdges()) { BasicBlock* predBlock = pred->getBlock(); /* Are we changing a loop backedge into a forward jump? */ if (block->isLoopHead() && (predBlock->bbNum >= block->bbNum) && (predBlock->bbNum <= succBlock->bbNum)) { /* First update the loop table and bbWeights */ optUpdateLoopsBeforeRemoveBlock(predBlock); } /* If predBlock is a new predecessor, then add it to succBlock's predecessor's list. */ if (predBlock->bbJumpKind != BBJ_SWITCH) { // Even if the pred is not a switch, we could have a conditional branch // to the fallthrough, so duplicate there could be preds for (unsigned i = 0; i < pred->flDupCount; i++) { fgAddRefPred(succBlock, predBlock); } } /* change all jumps to the removed block */ switch (predBlock->bbJumpKind) { default: noway_assert(!"Unexpected bbJumpKind in fgRemoveBlock()"); break; case BBJ_NONE: noway_assert(predBlock == bPrev); PREFIX_ASSUME(bPrev != nullptr); /* In the case of BBJ_ALWAYS we have to change the type of its predecessor */ if (block->bbJumpKind == BBJ_ALWAYS) { /* bPrev now becomes a BBJ_ALWAYS */ bPrev->bbJumpKind = BBJ_ALWAYS; bPrev->bbJumpDest = succBlock; } break; case BBJ_COND: /* The links for the direct predecessor case have already been updated above */ if (predBlock->bbJumpDest != block) { break; } /* Check if both side of the BBJ_COND now jump to the same block */ if (predBlock->bbNext == succBlock) { // Make sure we are replacing "block" with "succBlock" in predBlock->bbJumpDest. noway_assert(predBlock->bbJumpDest == block); predBlock->bbJumpDest = succBlock; fgRemoveConditionalJump(predBlock); break; } /* Fall through for the jump case */ FALLTHROUGH; case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: noway_assert(predBlock->bbJumpDest == block); predBlock->bbJumpDest = succBlock; break; case BBJ_SWITCH: // Change any jumps from 'predBlock' (a BBJ_SWITCH) to 'block' to jump to 'succBlock' // // For the jump targets of 'predBlock' (a BBJ_SWITCH) that jump to 'block' // remove the old predecessor at 'block' from 'predBlock' and // add the new predecessor at 'succBlock' from 'predBlock' // fgReplaceSwitchJumpTarget(predBlock, succBlock, block); break; } } fgUnlinkBlock(block); block->bbFlags |= BBF_REMOVED; } // If this was marked for alignment, remove it block->unmarkLoopAlign(this DEBUG_ARG("Removed block")); if (bPrev != nullptr) { switch (bPrev->bbJumpKind) { case BBJ_CALLFINALLY: // If prev is a BBJ_CALLFINALLY it better be marked as RETLESS noway_assert(bPrev->bbFlags & BBF_RETLESS_CALL); break; case BBJ_ALWAYS: // Check for branch to next block. Just make sure the BBJ_ALWAYS block is not // part of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. We do this here and don't rely on fgUpdateFlowGraph // because we can be called by ComputeDominators and it expects it to remove this jump to // the next block. This is the safest fix. We should remove all this BBJ_CALLFINALLY/BBJ_ALWAYS // pairing. if ((bPrev->bbJumpDest == bPrev->bbNext) && !fgInDifferentRegions(bPrev, bPrev->bbJumpDest)) // We don't remove a branch from Hot -> Cold { if ((bPrev == fgFirstBB) || !bPrev->isBBCallAlwaysPairTail()) { // It's safe to change the jump type bPrev->bbJumpKind = BBJ_NONE; } } break; case BBJ_COND: /* Check for branch to next block */ if (bPrev->bbJumpDest == bPrev->bbNext) { fgRemoveConditionalJump(bPrev); } break; default: break; } ehUpdateForDeletedBlock(block); } } /***************************************************************************** * * Function called to connect to block that previously had a fall through */ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) { BasicBlock* jmpBlk = nullptr; /* If bSrc is non-NULL */ if (bSrc != nullptr) { /* If bSrc falls through to a block that is not bDst, we will insert a jump to bDst */ if (bSrc->bbFallsThrough() && (bSrc->bbNext != bDst)) { switch (bSrc->bbJumpKind) { case BBJ_NONE: bSrc->bbJumpKind = BBJ_ALWAYS; bSrc->bbJumpDest = bDst; #ifdef DEBUG if (verbose) { printf("Block " FMT_BB " ended with a BBJ_NONE, Changed to an unconditional jump to " FMT_BB "\n", bSrc->bbNum, bSrc->bbJumpDest->bbNum); } #endif break; case BBJ_CALLFINALLY: case BBJ_COND: // Add a new block after bSrc which jumps to 'bDst' jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true); if (fgComputePredsDone) { fgAddRefPred(jmpBlk, bSrc, fgGetPredForBlock(bDst, bSrc)); } // Record the loop number in the new block jmpBlk->bbNatLoopNum = bSrc->bbNatLoopNum; // When adding a new jmpBlk we will set the bbWeight and bbFlags // if (fgHaveValidEdgeWeights && fgHaveProfileData()) { noway_assert(fgComputePredsDone); flowList* newEdge = fgGetPredForBlock(jmpBlk, bSrc); jmpBlk->bbWeight = (newEdge->edgeWeightMin() + newEdge->edgeWeightMax()) / 2; if (bSrc->bbWeight == BB_ZERO_WEIGHT) { jmpBlk->bbWeight = BB_ZERO_WEIGHT; } if (jmpBlk->bbWeight == BB_ZERO_WEIGHT) { jmpBlk->bbFlags |= BBF_RUN_RARELY; } weight_t weightDiff = (newEdge->edgeWeightMax() - newEdge->edgeWeightMin()); weight_t slop = BasicBlock::GetSlopFraction(bSrc, bDst); // // If the [min/max] values for our edge weight is within the slop factor // then we will set the BBF_PROF_WEIGHT flag for the block // if (weightDiff <= slop) { jmpBlk->bbFlags |= BBF_PROF_WEIGHT; } } else { // We set the bbWeight to the smaller of bSrc->bbWeight or bDst->bbWeight if (bSrc->bbWeight < bDst->bbWeight) { jmpBlk->bbWeight = bSrc->bbWeight; jmpBlk->bbFlags |= (bSrc->bbFlags & BBF_RUN_RARELY); } else { jmpBlk->bbWeight = bDst->bbWeight; jmpBlk->bbFlags |= (bDst->bbFlags & BBF_RUN_RARELY); } } jmpBlk->bbJumpDest = bDst; if (fgComputePredsDone) { fgReplacePred(bDst, bSrc, jmpBlk); } else { jmpBlk->bbFlags |= BBF_IMPORTED; } #ifdef DEBUG if (verbose) { printf("Added an unconditional jump to " FMT_BB " after block " FMT_BB "\n", jmpBlk->bbJumpDest->bbNum, bSrc->bbNum); } #endif // DEBUG break; default: noway_assert(!"Unexpected bbJumpKind"); break; } } else { // If bSrc is an unconditional branch to the next block // then change it to a BBJ_NONE block // if ((bSrc->bbJumpKind == BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (bSrc->bbJumpDest == bSrc->bbNext)) { bSrc->bbJumpKind = BBJ_NONE; #ifdef DEBUG if (verbose) { printf("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB " into a BBJ_NONE block\n", bSrc->bbNum, bSrc->bbNext->bbNum); } #endif // DEBUG } } } return jmpBlk; } //------------------------------------------------------------------------ // fgRenumberBlocks: update block bbNums to reflect bbNext order // // Returns: // true if blocks were renumbered or maxBBNum was updated. // // Notes: // Walk the flow graph, reassign block numbers to keep them in ascending order. // Return 'true' if any renumbering was actually done, OR if we change the // maximum number of assigned basic blocks (this can happen if we do inlining, // create a new, high-numbered block, then that block goes away. We go to // renumber the blocks, none of them actually change number, but we shrink the // maximum assigned block number. This affects the block set epoch). // // As a consequence of renumbering, block pred lists may need to be reordered. // bool Compiler::fgRenumberBlocks() { // If we renumber the blocks the dominator information will be out-of-date if (fgDomsComputed) { noway_assert(!"Can't call Compiler::fgRenumberBlocks() when fgDomsComputed==true"); } #ifdef DEBUG if (verbose) { printf("\n*************** Before renumbering the basic blocks\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool renumbered = false; bool newMaxBBNum = false; BasicBlock* block; unsigned numStart = 1 + (compIsForInlining() ? impInlineInfo->InlinerCompiler->fgBBNumMax : 0); unsigned num; for (block = fgFirstBB, num = numStart; block != nullptr; block = block->bbNext, num++) { noway_assert((block->bbFlags & BBF_REMOVED) == 0); if (block->bbNum != num) { renumbered = true; #ifdef DEBUG if (verbose) { printf("Renumber " FMT_BB " to " FMT_BB "\n", block->bbNum, num); } #endif // DEBUG block->bbNum = num; } if (block->bbNext == nullptr) { fgLastBB = block; fgBBcount = num - numStart + 1; if (compIsForInlining()) { if (impInlineInfo->InlinerCompiler->fgBBNumMax != num) { impInlineInfo->InlinerCompiler->fgBBNumMax = num; newMaxBBNum = true; } } else { if (fgBBNumMax != num) { fgBBNumMax = num; newMaxBBNum = true; } } } } // If we renumbered, then we may need to reorder some pred lists. // if (renumbered && fgComputePredsDone) { for (BasicBlock* const block : Blocks()) { block->ensurePredListOrder(this); } } #ifdef DEBUG if (verbose) { printf("\n*************** After renumbering the basic blocks\n"); if (renumbered) { fgDispBasicBlocks(); fgDispHandlerTab(); } else { printf("=============== No blocks renumbered!\n"); } } #endif // DEBUG // Now update the BlockSet epoch, which depends on the block numbers. // If any blocks have been renumbered then create a new BlockSet epoch. // Even if we have not renumbered any blocks, we might still need to force // a new BlockSet epoch, for one of several reasons. If there are any new // blocks with higher numbers than the former maximum numbered block, then we // need a new epoch with a new size matching the new largest numbered block. // Also, if the number of blocks is different from the last time we set the // BlockSet epoch, then we need a new epoch. This wouldn't happen if we // renumbered blocks after every block addition/deletion, but it might be // the case that we can change the number of blocks, then set the BlockSet // epoch without renumbering, then change the number of blocks again, then // renumber. if (renumbered || newMaxBBNum) { NewBasicBlockEpoch(); // The key in the unique switch successor map is dependent on the block number, so invalidate that cache. InvalidateUniqueSwitchSuccMap(); } else { EnsureBasicBlockEpoch(); } // Tell our caller if any blocks actually were renumbered. return renumbered || newMaxBBNum; } /***************************************************************************** * * Is the BasicBlock bJump a forward branch? * Optionally bSrc can be supplied to indicate that * bJump must be forward with respect to bSrc */ bool Compiler::fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc /* = NULL */) { bool result = false; if (bJump->KindIs(BBJ_COND, BBJ_ALWAYS)) { BasicBlock* bDest = bJump->bbJumpDest; BasicBlock* bTemp = (bSrc == nullptr) ? bJump : bSrc; while (true) { bTemp = bTemp->bbNext; if (bTemp == nullptr) { break; } if (bTemp == bDest) { result = true; break; } } } return result; } /***************************************************************************** * * Returns true if it is allowable (based upon the EH regions) * to place block bAfter immediately after bBefore. It is allowable * if the 'bBefore' and 'bAfter' blocks are in the exact same EH region. */ bool Compiler::fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter) { return BasicBlock::sameEHRegion(bBefore, bAfter); } /***************************************************************************** * * Function called to move the range of blocks [bStart .. bEnd]. * The blocks are placed immediately after the insertAfterBlk. * fgFirstFuncletBB is not updated; that is the responsibility of the caller, if necessary. */ void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk) { /* We have decided to insert the block(s) after 'insertAfterBlk' */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("Relocated block%s [" FMT_BB ".." FMT_BB "] inserted after " FMT_BB "%s\n", (bStart == bEnd) ? "" : "s", bStart->bbNum, bEnd->bbNum, insertAfterBlk->bbNum, (insertAfterBlk->bbNext == nullptr) ? " at the end of method" : ""); } #endif // DEBUG /* relink [bStart .. bEnd] into the flow graph */ bEnd->bbNext = insertAfterBlk->bbNext; if (insertAfterBlk->bbNext) { insertAfterBlk->bbNext->bbPrev = bEnd; } insertAfterBlk->setNext(bStart); /* If insertAfterBlk was fgLastBB then update fgLastBB */ if (insertAfterBlk == fgLastBB) { fgLastBB = bEnd; noway_assert(fgLastBB->bbNext == nullptr); } } /***************************************************************************** * * Function called to relocate a single range to the end of the method. * Only an entire consecutive region can be moved and it will be kept together. * Except for the first block, the range cannot have any blocks that jump into or out of the region. * When successful we return the bLast block which is the last block that we relocated. * When unsuccessful we return NULL. ============================================================= NOTE: This function can invalidate all pointers into the EH table, as well as change the size of the EH table! ============================================================= */ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType) { INDEBUG(const char* reason = "None";) // Figure out the range of blocks we're going to move unsigned XTnum; EHblkDsc* HBtab; BasicBlock* bStart = nullptr; BasicBlock* bMiddle = nullptr; BasicBlock* bLast = nullptr; BasicBlock* bPrev = nullptr; #if defined(FEATURE_EH_FUNCLETS) // We don't support moving try regions... yet? noway_assert(relocateType == FG_RELOCATE_HANDLER); #endif // FEATURE_EH_FUNCLETS HBtab = ehGetDsc(regionIndex); if (relocateType == FG_RELOCATE_TRY) { bStart = HBtab->ebdTryBeg; bLast = HBtab->ebdTryLast; } else if (relocateType == FG_RELOCATE_HANDLER) { if (HBtab->HasFilter()) { // The filter and handler funclets must be moved together, and remain contiguous. bStart = HBtab->ebdFilter; bMiddle = HBtab->ebdHndBeg; bLast = HBtab->ebdHndLast; } else { bStart = HBtab->ebdHndBeg; bLast = HBtab->ebdHndLast; } } // Our range must contain either all rarely run blocks or all non-rarely run blocks bool inTheRange = false; bool validRange = false; BasicBlock* block; noway_assert(bStart != nullptr && bLast != nullptr); if (bStart == fgFirstBB) { INDEBUG(reason = "can not relocate first block";) goto FAILURE; } #if !defined(FEATURE_EH_FUNCLETS) // In the funclets case, we still need to set some information on the handler blocks if (bLast->bbNext == NULL) { INDEBUG(reason = "region is already at the end of the method";) goto FAILURE; } #endif // !FEATURE_EH_FUNCLETS // Walk the block list for this purpose: // 1. Verify that all the blocks in the range are either all rarely run or not rarely run. // When creating funclets, we ignore the run rarely flag, as we need to be able to move any blocks // in the range. CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(FEATURE_EH_FUNCLETS) bool isRare; isRare = bStart->isRunRarely(); #endif // !FEATURE_EH_FUNCLETS block = fgFirstBB; while (true) { if (block == bStart) { noway_assert(inTheRange == false); inTheRange = true; } else if (block == bLast->bbNext) { noway_assert(inTheRange == true); inTheRange = false; break; // we found the end, so we're done } if (inTheRange) { #if !defined(FEATURE_EH_FUNCLETS) // Unless all blocks are (not) run rarely we must return false. if (isRare != block->isRunRarely()) { INDEBUG(reason = "this region contains both rarely run and non-rarely run blocks";) goto FAILURE; } #endif // !FEATURE_EH_FUNCLETS validRange = true; } if (block == nullptr) { break; } block = block->bbNext; } // Ensure that bStart .. bLast defined a valid range noway_assert((validRange == true) && (inTheRange == false)); bPrev = bStart->bbPrev; noway_assert(bPrev != nullptr); // Can't move a range that includes the first block of the function. JITDUMP("Relocating %s range " FMT_BB ".." FMT_BB " (EH#%u) to end of BBlist\n", (relocateType == FG_RELOCATE_TRY) ? "try" : "handler", bStart->bbNum, bLast->bbNum, regionIndex); #ifdef DEBUG if (verbose) { fgDispBasicBlocks(); fgDispHandlerTab(); } #if !defined(FEATURE_EH_FUNCLETS) // This is really expensive, and quickly becomes O(n^n) with funclets // so only do it once after we've created them (see fgCreateFunclets) if (expensiveDebugCheckLevel >= 2) { fgDebugCheckBBlist(); } #endif #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) bStart->bbFlags |= BBF_FUNCLET_BEG; // Mark the start block of the funclet if (bMiddle != nullptr) { bMiddle->bbFlags |= BBF_FUNCLET_BEG; // Also mark the start block of a filter handler as a funclet } #endif // FEATURE_EH_FUNCLETS BasicBlock* bNext; bNext = bLast->bbNext; /* Temporarily unlink [bStart .. bLast] from the flow graph */ fgUnlinkRange(bStart, bLast); BasicBlock* insertAfterBlk; insertAfterBlk = fgLastBB; #if defined(FEATURE_EH_FUNCLETS) // There are several cases we need to consider when moving an EH range. // If moving a range X, we must consider its relationship to every other EH // range A in the table. Note that each entry in the table represents both // a protected region and a handler region (possibly including a filter region // that must live before and adjacent to the handler region), so we must // consider try and handler regions independently. These are the cases: // 1. A is completely contained within X (where "completely contained" means // that the 'begin' and 'last' parts of A are strictly between the 'begin' // and 'end' parts of X, and aren't equal to either, for example, they don't // share 'last' blocks). In this case, when we move X, A moves with it, and // the EH table doesn't need to change. // 2. X is completely contained within A. In this case, X gets extracted from A, // and the range of A shrinks, but because A is strictly within X, the EH // table doesn't need to change. // 3. A and X have exactly the same range. In this case, A is moving with X and // the EH table doesn't need to change. // 4. A and X share the 'last' block. There are two sub-cases: // (a) A is a larger range than X (such that the beginning of A precedes the // beginning of X): in this case, we are moving the tail of A. We set the // 'last' block of A to the the block preceding the beginning block of X. // (b) A is a smaller range than X. Thus, we are moving the entirety of A along // with X. In this case, nothing in the EH record for A needs to change. // 5. A and X share the 'beginning' block (but aren't the same range, as in #3). // This can never happen here, because we are only moving handler ranges (we don't // move try ranges), and handler regions cannot start at the beginning of a try // range or handler range and be a subset. // // Note that A and X must properly nest for the table to be well-formed. For example, // the beginning of A can't be strictly within the range of X (that is, the beginning // of A isn't shared with the beginning of X) and the end of A outside the range. for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { if (XTnum != regionIndex) // we don't need to update our 'last' pointer { if (HBtab->ebdTryLast == bLast) { // If we moved a set of blocks that were at the end of // a different try region then we may need to update ebdTryLast for (block = HBtab->ebdTryBeg; block != nullptr; block = block->bbNext) { if (block == bPrev) { // We were contained within it, so shrink its region by // setting its 'last' fgSetTryEnd(HBtab, bPrev); break; } else if (block == HBtab->ebdTryLast->bbNext) { // bPrev does not come after the TryBeg, thus we are larger, and // it is moving with us. break; } } } if (HBtab->ebdHndLast == bLast) { // If we moved a set of blocks that were at the end of // a different handler region then we must update ebdHndLast for (block = HBtab->ebdHndBeg; block != nullptr; block = block->bbNext) { if (block == bPrev) { fgSetHndEnd(HBtab, bPrev); break; } else if (block == HBtab->ebdHndLast->bbNext) { // bPrev does not come after the HndBeg break; } } } } } // end exception table iteration // Insert the block(s) we are moving after fgLastBlock fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); if (fgFirstFuncletBB == nullptr) // The funclet region isn't set yet { fgFirstFuncletBB = bStart; } else { assert(fgFirstFuncletBB != insertAfterBlk->bbNext); // We insert at the end, not at the beginning, of the funclet region. } // These asserts assume we aren't moving try regions (which we might need to do). Only // try regions can have fall through into or out of the region. noway_assert(!bPrev->bbFallsThrough()); // There can be no fall through into a filter or handler region noway_assert(!bLast->bbFallsThrough()); // There can be no fall through out of a handler region #ifdef DEBUG if (verbose) { printf("Create funclets: moved region\n"); fgDispHandlerTab(); } // We have to wait to do this until we've created all the additional regions // Because this relies on ebdEnclosingTryIndex and ebdEnclosingHndIndex #endif // DEBUG #else // !FEATURE_EH_FUNCLETS for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { if (XTnum == regionIndex) { // Don't update our handler's Last info continue; } if (HBtab->ebdTryLast == bLast) { // If we moved a set of blocks that were at the end of // a different try region then we may need to update ebdTryLast for (block = HBtab->ebdTryBeg; block != NULL; block = block->bbNext) { if (block == bPrev) { fgSetTryEnd(HBtab, bPrev); break; } else if (block == HBtab->ebdTryLast->bbNext) { // bPrev does not come after the TryBeg break; } } } if (HBtab->ebdHndLast == bLast) { // If we moved a set of blocks that were at the end of // a different handler region then we must update ebdHndLast for (block = HBtab->ebdHndBeg; block != NULL; block = block->bbNext) { if (block == bPrev) { fgSetHndEnd(HBtab, bPrev); break; } else if (block == HBtab->ebdHndLast->bbNext) { // bPrev does not come after the HndBeg break; } } } } // end exception table iteration // We have decided to insert the block(s) after fgLastBlock fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); // If bPrev falls through, we will insert a jump to block fgConnectFallThrough(bPrev, bStart); // If bLast falls through, we will insert a jump to bNext fgConnectFallThrough(bLast, bNext); #endif // !FEATURE_EH_FUNCLETS goto DONE; FAILURE: #ifdef DEBUG if (verbose) { printf("*************** Failed fgRelocateEHRange(" FMT_BB ".." FMT_BB ") because %s\n", bStart->bbNum, bLast->bbNum, reason); } #endif // DEBUG bLast = nullptr; DONE: return bLast; } //------------------------------------------------------------------------ // fgMightHaveLoop: return true if there is a possibility that the method has a loop (a back edge is present). // This function doesn't depend on any previous loop computations, including predecessors. It looks for any // lexical back edge to a block previously seen in a forward walk of the block list. // // As it walks all blocks and all successors of each block (including EH successors), it is not cheap. // It returns as soon as any possible loop is discovered. // // Return Value: // true if there might be a loop // bool Compiler::fgMightHaveLoop() { // Don't use a BlockSet for this temporary bitset of blocks: we don't want to have to call EnsureBasicBlockEpoch() // and potentially change the block epoch. BitVecTraits blockVecTraits(fgBBNumMax + 1, this); BitVec blocksSeen(BitVecOps::MakeEmpty(&blockVecTraits)); for (BasicBlock* const block : Blocks()) { BitVecOps::AddElemD(&blockVecTraits, blocksSeen, block->bbNum); for (BasicBlock* const succ : block->GetAllSuccs(this)) { if (BitVecOps::IsMember(&blockVecTraits, blocksSeen, succ->bbNum)) { return true; } } } return false; } /***************************************************************************** * * Insert a BasicBlock before the given block. */ BasicBlock* Compiler::fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion) { // Create a new BasicBlock and chain it in BasicBlock* newBlk = bbNewBasicBlock(jumpKind); newBlk->bbFlags |= BBF_INTERNAL; fgInsertBBbefore(block, newBlk); newBlk->bbRefs = 0; if (newBlk->bbFallsThrough() && block->isRunRarely()) { newBlk->bbSetRunRarely(); } if (extendRegion) { fgExtendEHRegionBefore(block); } else { // When extendRegion is false the caller is responsible for setting these two values newBlk->setTryIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely newBlk->setHndIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely } // We assume that if the block we are inserting before is in the cold region, then this new // block will also be in the cold region. newBlk->bbFlags |= (block->bbFlags & BBF_COLD); return newBlk; } /***************************************************************************** * * Insert a BasicBlock after the given block. */ BasicBlock* Compiler::fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion) { // Create a new BasicBlock and chain it in BasicBlock* newBlk = bbNewBasicBlock(jumpKind); newBlk->bbFlags |= BBF_INTERNAL; fgInsertBBafter(block, newBlk); newBlk->bbRefs = 0; if (block->bbFallsThrough() && block->isRunRarely()) { newBlk->bbSetRunRarely(); } if (extendRegion) { fgExtendEHRegionAfter(block); } else { // When extendRegion is false the caller is responsible for setting these two values newBlk->setTryIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely newBlk->setHndIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely } // If the new block is in the cold region (because the block we are inserting after // is in the cold region), mark it as such. newBlk->bbFlags |= (block->bbFlags & BBF_COLD); return newBlk; } /***************************************************************************** * Inserts basic block before existing basic block. * * If insertBeforeBlk is in the funclet region, then newBlk will be in the funclet region. * (If insertBeforeBlk is the first block of the funclet region, then 'newBlk' will be the * new first block of the funclet region.) */ void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk) { if (insertBeforeBlk->bbPrev) { fgInsertBBafter(insertBeforeBlk->bbPrev, newBlk); } else { newBlk->setNext(fgFirstBB); fgFirstBB = newBlk; newBlk->bbPrev = nullptr; } #if defined(FEATURE_EH_FUNCLETS) /* Update fgFirstFuncletBB if insertBeforeBlk is the first block of the funclet region. */ if (fgFirstFuncletBB == insertBeforeBlk) { fgFirstFuncletBB = newBlk; } #endif // FEATURE_EH_FUNCLETS } /***************************************************************************** * Inserts basic block after existing basic block. * * If insertBeforeBlk is in the funclet region, then newBlk will be in the funclet region. * (It can't be used to insert a block as the first block of the funclet region). */ void Compiler::fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk) { newBlk->bbNext = insertAfterBlk->bbNext; if (insertAfterBlk->bbNext) { insertAfterBlk->bbNext->bbPrev = newBlk; } insertAfterBlk->bbNext = newBlk; newBlk->bbPrev = insertAfterBlk; if (fgLastBB == insertAfterBlk) { fgLastBB = newBlk; assert(fgLastBB->bbNext == nullptr); } } // We have two edges (bAlt => bCur) and (bCur => bNext). // // Returns true if the weight of (bAlt => bCur) // is greater than the weight of (bCur => bNext). // We compare the edge weights if we have valid edge weights // otherwise we compare blocks weights. // bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) { // bCur can't be NULL and must be a fall through bbJumpKind noway_assert(bCur != nullptr); noway_assert(bCur->bbFallsThrough()); noway_assert(bAlt != nullptr); // We only handle the cases when bAlt is a BBJ_ALWAYS or a BBJ_COND if (!bAlt->KindIs(BBJ_ALWAYS, BBJ_COND)) { return false; } // if bAlt doesn't jump to bCur it can't be a better fall through than bCur if (bAlt->bbJumpDest != bCur) { return false; } // Currently bNext is the fall through for bCur BasicBlock* bNext = bCur->bbNext; noway_assert(bNext != nullptr); // We will set result to true if bAlt is a better fall through than bCur bool result; if (fgHaveValidEdgeWeights) { // We will compare the edge weight for our two choices flowList* edgeFromAlt = fgGetPredForBlock(bCur, bAlt); flowList* edgeFromCur = fgGetPredForBlock(bNext, bCur); noway_assert(edgeFromCur != nullptr); noway_assert(edgeFromAlt != nullptr); result = (edgeFromAlt->edgeWeightMin() > edgeFromCur->edgeWeightMax()); } else { if (bAlt->bbJumpKind == BBJ_ALWAYS) { // Our result is true if bAlt's weight is more than bCur's weight result = (bAlt->bbWeight > bCur->bbWeight); } else { noway_assert(bAlt->bbJumpKind == BBJ_COND); // Our result is true if bAlt's weight is more than twice bCur's weight result = (bAlt->bbWeight > (2 * bCur->bbWeight)); } } return result; } //------------------------------------------------------------------------ // Finds the block closest to endBlk in the range [startBlk..endBlk) after which a block can be // inserted easily. Note that endBlk cannot be returned; its predecessor is the last block that can // be returned. The new block will be put in an EH region described by the arguments regionIndex, // putInTryRegion, startBlk, and endBlk (explained below), so it must be legal to place to put the // new block after the insertion location block, give it the specified EH region index, and not break // EH nesting rules. This function is careful to choose a block in the correct EH region. However, // it assumes that the new block can ALWAYS be placed at the end (just before endBlk). That means // that the caller must ensure that is true. // // Below are the possible cases for the arguments to this method: // 1. putInTryRegion == true and regionIndex > 0: // Search in the try region indicated by regionIndex. // 2. putInTryRegion == false and regionIndex > 0: // a. If startBlk is the first block of a filter and endBlk is the block after the end of the // filter (that is, the startBlk and endBlk match a filter bounds exactly), then choose a // location within this filter region. (Note that, due to IL rules, filters do not have any // EH nested within them.) Otherwise, filters are skipped. // b. Else, search in the handler region indicated by regionIndex. // 3. regionIndex = 0: // Search in the entire main method, excluding all EH regions. In this case, putInTryRegion must be true. // // This method makes sure to find an insertion point which would not cause the inserted block to // be put inside any inner try/filter/handler regions. // // The actual insertion occurs after the returned block. Note that the returned insertion point might // be the last block of a more nested EH region, because the new block will be inserted after the insertion // point, and will not extend the more nested EH region. For example: // // try3 try2 try1 // |--- | | BB01 // | |--- | BB02 // | | |--- BB03 // | | | BB04 // | |--- |--- BB05 // | BB06 // |----------------- BB07 // // for regionIndex==try3, putInTryRegion==true, we might return BB05, even though BB05 will have a try index // for try1 (the most nested 'try' region the block is in). That's because when we insert after BB05, the new // block will be in the correct, desired EH region, since try1 and try2 regions will not be extended to include // the inserted block. Furthermore, for regionIndex==try2, putInTryRegion==true, we can also return BB05. In this // case, when the new block is inserted, the try1 region remains the same, but we need extend region 'try2' to // include the inserted block. (We also need to check all parent regions as well, just in case any parent regions // also end on the same block, in which case we would also need to extend the parent regions. This is standard // procedure when inserting a block at the end of an EH region.) // // If nearBlk is non-nullptr then we return the closest block after nearBlk that will work best. // // We try to find a block in the appropriate region that is not a fallthrough block, so we can insert after it // without the need to insert a jump around the inserted block. // // Note that regionIndex is numbered the same as BasicBlock::bbTryIndex and BasicBlock::bbHndIndex, that is, "0" is // "main method" and otherwise is +1 from normal, so we can call, e.g., ehGetDsc(tryIndex - 1). // // Arguments: // regionIndex - the region index where the new block will be inserted. Zero means entire method; // non-zero means either a "try" or a "handler" region, depending on what putInTryRegion says. // putInTryRegion - 'true' to put the block in the 'try' region corresponding to 'regionIndex', 'false' // to put the block in the handler region. Should be 'true' if regionIndex==0. // startBlk - start block of range to search. // endBlk - end block of range to search (don't include this block in the range). Can be nullptr to indicate // the end of the function. // nearBlk - If non-nullptr, try to find an insertion location closely after this block. If nullptr, we insert // at the best location found towards the end of the acceptable block range. // jumpBlk - When nearBlk is set, this can be set to the block which jumps to bNext->bbNext (TODO: need to review // this?) // runRarely - true if the block being inserted is expected to be rarely run. This helps determine // the best place to put the new block, by putting in a place that has the same 'rarely run' characteristic. // // Return Value: // A block with the desired characteristics, so the new block will be inserted after this one. // If there is no suitable location, return nullptr. This should basically never happen. // BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, bool putInTryRegion, BasicBlock* startBlk, BasicBlock* endBlk, BasicBlock* nearBlk, BasicBlock* jumpBlk, bool runRarely) { noway_assert(startBlk != nullptr); noway_assert(startBlk != endBlk); noway_assert((regionIndex == 0 && putInTryRegion) || // Search in the main method (putInTryRegion && regionIndex > 0 && startBlk->bbTryIndex == regionIndex) || // Search in the specified try region (!putInTryRegion && regionIndex > 0 && startBlk->bbHndIndex == regionIndex)); // Search in the specified handler region #ifdef DEBUG // Assert that startBlk precedes endBlk in the block list. // We don't want to use bbNum to assert this condition, as we cannot depend on the block numbers being // sequential at all times. for (BasicBlock* b = startBlk; b != endBlk; b = b->bbNext) { assert(b != nullptr); // We reached the end of the block list, but never found endBlk. } #endif // DEBUG JITDUMP("fgFindInsertPoint(regionIndex=%u, putInTryRegion=%s, startBlk=" FMT_BB ", endBlk=" FMT_BB ", nearBlk=" FMT_BB ", " "jumpBlk=" FMT_BB ", runRarely=%s)\n", regionIndex, dspBool(putInTryRegion), startBlk->bbNum, (endBlk == nullptr) ? 0 : endBlk->bbNum, (nearBlk == nullptr) ? 0 : nearBlk->bbNum, (jumpBlk == nullptr) ? 0 : jumpBlk->bbNum, dspBool(runRarely)); bool insertingIntoFilter = false; if (!putInTryRegion) { EHblkDsc* const dsc = ehGetDsc(regionIndex - 1); insertingIntoFilter = dsc->HasFilter() && (startBlk == dsc->ebdFilter) && (endBlk == dsc->ebdHndBeg); } bool reachedNear = false; // Have we reached 'nearBlk' in our search? If not, we'll keep searching. bool inFilter = false; // Are we in a filter region that we need to skip? BasicBlock* bestBlk = nullptr; // Set to the best insertion point we've found so far that meets all the EH requirements. BasicBlock* goodBlk = nullptr; // Set to an acceptable insertion point that we'll use if we don't find a 'best' option. BasicBlock* blk; if (nearBlk != nullptr) { // Does the nearBlk precede the startBlk? for (blk = nearBlk; blk != nullptr; blk = blk->bbNext) { if (blk == startBlk) { reachedNear = true; break; } else if (blk == endBlk) { break; } } } for (blk = startBlk; blk != endBlk; blk = blk->bbNext) { // The only way (blk == nullptr) could be true is if the caller passed an endBlk that preceded startBlk in the // block list, or if endBlk isn't in the block list at all. In DEBUG, we'll instead hit the similar // well-formedness assert earlier in this function. noway_assert(blk != nullptr); if (blk == nearBlk) { reachedNear = true; } if (blk->bbCatchTyp == BBCT_FILTER) { // Record the fact that we entered a filter region, so we don't insert into filters... // Unless the caller actually wanted the block inserted in this exact filter region. if (!insertingIntoFilter || (blk != startBlk)) { inFilter = true; } } else if (blk->bbCatchTyp == BBCT_FILTER_HANDLER) { // Record the fact that we exited a filter region. inFilter = false; } // Don't insert a block inside this filter region. if (inFilter) { continue; } // Note that the new block will be inserted AFTER "blk". We check to make sure that doing so // would put the block in the correct EH region. We make an assumption here that you can // ALWAYS insert the new block before "endBlk" (that is, at the end of the search range) // and be in the correct EH region. This is must be guaranteed by the caller (as it is by // fgNewBBinRegion(), which passes the search range as an exact EH region block range). // Because of this assumption, we only check the EH information for blocks before the last block. if (blk->bbNext != endBlk) { // We are in the middle of the search range. We can't insert the new block in // an inner try or handler region. We can, however, set the insertion // point to the last block of an EH try/handler region, if the enclosing // region is the region we wish to insert in. (Since multiple regions can // end at the same block, we need to search outwards, checking that the // block is the last block of every EH region out to the region we want // to insert in.) This is especially useful for putting a call-to-finally // block on AMD64 immediately after its corresponding 'try' block, so in the // common case, we'll just fall through to it. For example: // // BB01 // BB02 -- first block of try // BB03 // BB04 -- last block of try // BB05 -- first block of finally // BB06 // BB07 -- last block of handler // BB08 // // Assume there is only one try/finally, so BB01 and BB08 are in the "main function". // For AMD64 call-to-finally, we'll want to insert the BBJ_CALLFINALLY in // the main function, immediately after BB04. This allows us to do that. if (!fgCheckEHCanInsertAfterBlock(blk, regionIndex, putInTryRegion)) { // Can't insert here. continue; } } // Look for an insert location: // 1. We want blocks that don't end with a fall through, // 2. Also, when blk equals nearBlk we may want to insert here. if (!blk->bbFallsThrough() || (blk == nearBlk)) { bool updateBestBlk = true; // We will probably update the bestBlk // If blk falls through then we must decide whether to use the nearBlk // hint if (blk->bbFallsThrough()) { noway_assert(blk == nearBlk); if (jumpBlk != nullptr) { updateBestBlk = fgIsBetterFallThrough(blk, jumpBlk); } else { updateBestBlk = false; } } // If we already have a best block, see if the 'runRarely' flags influences // our choice. If we want a runRarely insertion point, and the existing best // block is run rarely but the current block isn't run rarely, then don't // update the best block. // TODO-CQ: We should also handle the reverse case, where runRarely is false (we // want a non-rarely-run block), but bestBlock->isRunRarely() is true. In that // case, we should update the block, also. Probably what we want is: // (bestBlk->isRunRarely() != runRarely) && (blk->isRunRarely() == runRarely) if (updateBestBlk && (bestBlk != nullptr) && runRarely && bestBlk->isRunRarely() && !blk->isRunRarely()) { updateBestBlk = false; } if (updateBestBlk) { // We found a 'best' insertion location, so save it away. bestBlk = blk; // If we've reached nearBlk, we've satisfied all the criteria, // so we're done. if (reachedNear) { goto DONE; } // If we haven't reached nearBlk, keep looking for a 'best' location, just // in case we'll find one at or after nearBlk. If no nearBlk was specified, // we prefer inserting towards the end of the given range, so keep looking // for more acceptable insertion locations. } } // No need to update goodBlk after we have set bestBlk, but we could still find a better // bestBlk, so keep looking. if (bestBlk != nullptr) { continue; } // Set the current block as a "good enough" insertion point, if it meets certain criteria. // We'll return this block if we don't find a "best" block in the search range. The block // can't be a BBJ_CALLFINALLY of a BBJ_CALLFINALLY/BBJ_ALWAYS pair (since we don't want // to insert anything between these two blocks). Otherwise, we can use it. However, // if we'd previously chosen a BBJ_COND block, then we'd prefer the "good" block to be // something else. We keep updating it until we've reached the 'nearBlk', to push it as // close to endBlk as possible. if (!blk->isBBCallAlwaysPair()) { if (goodBlk == nullptr) { goodBlk = blk; } else if ((goodBlk->bbJumpKind == BBJ_COND) || (blk->bbJumpKind != BBJ_COND)) { if ((blk == nearBlk) || !reachedNear) { goodBlk = blk; } } } } // If we didn't find a non-fall_through block, then insert at the last good block. if (bestBlk == nullptr) { bestBlk = goodBlk; } DONE: #if defined(JIT32_GCENCODER) // If we are inserting into a filter and the best block is the end of the filter region, we need to // insert after its predecessor instead: the JIT32 GC encoding used by the x86 CLR ABI states that the // terminal block of a filter region is its exit block. If the filter region consists of a single block, // a new block cannot be inserted without either splitting the single block before inserting a new block // or inserting the new block before the single block and updating the filter description such that the // inserted block is marked as the entry block for the filter. Becuase this sort of split can be complex // (especially given that it must ensure that the liveness of the exception object is properly tracked), // we avoid this situation by never generating single-block filters on x86 (see impPushCatchArgOnStack). if (insertingIntoFilter && (bestBlk == endBlk->bbPrev)) { assert(bestBlk != startBlk); bestBlk = bestBlk->bbPrev; } #endif // defined(JIT32_GCENCODER) return bestBlk; } //------------------------------------------------------------------------ // Creates a new BasicBlock and inserts it in a specific EH region, given by 'tryIndex', 'hndIndex', and 'putInFilter'. // // If 'putInFilter' it true, then the block is inserted in the filter region given by 'hndIndex'. In this case, tryIndex // must be a less nested EH region (that is, tryIndex > hndIndex). // // Otherwise, the block is inserted in either the try region or the handler region, depending on which one is the inner // region. In other words, if the try region indicated by tryIndex is nested in the handler region indicated by // hndIndex, // then the new BB will be created in the try region. Vice versa. // // Note that tryIndex and hndIndex are numbered the same as BasicBlock::bbTryIndex and BasicBlock::bbHndIndex, that is, // "0" is "main method" and otherwise is +1 from normal, so we can call, e.g., ehGetDsc(tryIndex - 1). // // To be more specific, this function will create a new BB in one of the following 5 regions (if putInFilter is false): // 1. When tryIndex = 0 and hndIndex = 0: // The new BB will be created in the method region. // 2. When tryIndex != 0 and hndIndex = 0: // The new BB will be created in the try region indicated by tryIndex. // 3. When tryIndex == 0 and hndIndex != 0: // The new BB will be created in the handler region indicated by hndIndex. // 4. When tryIndex != 0 and hndIndex != 0 and tryIndex < hndIndex: // In this case, the try region is nested inside the handler region. Therefore, the new BB will be created // in the try region indicated by tryIndex. // 5. When tryIndex != 0 and hndIndex != 0 and tryIndex > hndIndex: // In this case, the handler region is nested inside the try region. Therefore, the new BB will be created // in the handler region indicated by hndIndex. // // Note that if tryIndex != 0 and hndIndex != 0 then tryIndex must not be equal to hndIndex (this makes sense because // if they are equal, you are asking to put the new block in both the try and handler, which is impossible). // // The BasicBlock will not be inserted inside an EH region that is more nested than the requested tryIndex/hndIndex // region (so the function is careful to skip more nested EH regions when searching for a place to put the new block). // // This function cannot be used to insert a block as the first block of any region. It always inserts a block after // an existing block in the given region. // // If nearBlk is nullptr, or the block is run rarely, then the new block is assumed to be run rarely. // // Arguments: // jumpKind - the jump kind of the new block to create. // tryIndex - the try region to insert the new block in, described above. This must be a number in the range // [0..compHndBBtabCount]. // hndIndex - the handler region to insert the new block in, described above. This must be a number in the range // [0..compHndBBtabCount]. // nearBlk - insert the new block closely after this block, if possible. If nullptr, put the new block anywhere // in the requested region. // putInFilter - put the new block in the filter region given by hndIndex, as described above. // runRarely - 'true' if the new block is run rarely. // insertAtEnd - 'true' if the block should be inserted at the end of the region. Note: this is currently only // implemented when inserting into the main function (not into any EH region). // // Return Value: // The new block. BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, bool putInFilter /* = false */, bool runRarely /* = false */, bool insertAtEnd /* = false */) { assert(tryIndex <= compHndBBtabCount); assert(hndIndex <= compHndBBtabCount); /* afterBlk is the block which will precede the newBB */ BasicBlock* afterBlk; // start and end limit for inserting the block BasicBlock* startBlk = nullptr; BasicBlock* endBlk = nullptr; bool putInTryRegion = true; unsigned regionIndex = 0; // First, figure out which region (the "try" region or the "handler" region) to put the newBB in. if ((tryIndex == 0) && (hndIndex == 0)) { assert(!putInFilter); endBlk = fgEndBBAfterMainFunction(); // don't put new BB in funclet region if (insertAtEnd || (nearBlk == nullptr)) { /* We'll just insert the block at the end of the method, before the funclets */ afterBlk = fgLastBBInMainFunction(); goto _FoundAfterBlk; } else { // We'll search through the entire method startBlk = fgFirstBB; } noway_assert(regionIndex == 0); } else { noway_assert(tryIndex > 0 || hndIndex > 0); PREFIX_ASSUME(tryIndex <= compHndBBtabCount); PREFIX_ASSUME(hndIndex <= compHndBBtabCount); // Decide which region to put in, the "try" region or the "handler" region. if (tryIndex == 0) { noway_assert(hndIndex > 0); putInTryRegion = false; } else if (hndIndex == 0) { noway_assert(tryIndex > 0); noway_assert(putInTryRegion); assert(!putInFilter); } else { noway_assert(tryIndex > 0 && hndIndex > 0 && tryIndex != hndIndex); putInTryRegion = (tryIndex < hndIndex); } if (putInTryRegion) { // Try region is the inner region. // In other words, try region must be nested inside the handler region. noway_assert(hndIndex == 0 || bbInHandlerRegions(hndIndex - 1, ehGetDsc(tryIndex - 1)->ebdTryBeg)); assert(!putInFilter); } else { // Handler region is the inner region. // In other words, handler region must be nested inside the try region. noway_assert(tryIndex == 0 || bbInTryRegions(tryIndex - 1, ehGetDsc(hndIndex - 1)->ebdHndBeg)); } // Figure out the start and end block range to search for an insertion location. Pick the beginning and // ending blocks of the target EH region (the 'endBlk' is one past the last block of the EH region, to make // loop iteration easier). Note that, after funclets have been created (for FEATURE_EH_FUNCLETS), // this linear block range will not include blocks of handlers for try/handler clauses nested within // this EH region, as those blocks have been extracted as funclets. That is ok, though, because we don't // want to insert a block in any nested EH region. if (putInTryRegion) { // We will put the newBB in the try region. EHblkDsc* ehDsc = ehGetDsc(tryIndex - 1); startBlk = ehDsc->ebdTryBeg; endBlk = ehDsc->ebdTryLast->bbNext; regionIndex = tryIndex; } else if (putInFilter) { // We will put the newBB in the filter region. EHblkDsc* ehDsc = ehGetDsc(hndIndex - 1); startBlk = ehDsc->ebdFilter; endBlk = ehDsc->ebdHndBeg; regionIndex = hndIndex; } else { // We will put the newBB in the handler region. EHblkDsc* ehDsc = ehGetDsc(hndIndex - 1); startBlk = ehDsc->ebdHndBeg; endBlk = ehDsc->ebdHndLast->bbNext; regionIndex = hndIndex; } noway_assert(regionIndex > 0); } // Now find the insertion point. afterBlk = fgFindInsertPoint(regionIndex, putInTryRegion, startBlk, endBlk, nearBlk, nullptr, runRarely); _FoundAfterBlk:; /* We have decided to insert the block after 'afterBlk'. */ noway_assert(afterBlk != nullptr); JITDUMP("fgNewBBinRegion(jumpKind=%u, tryIndex=%u, hndIndex=%u, putInFilter=%s, runRarely=%s, insertAtEnd=%s): " "inserting after " FMT_BB "\n", jumpKind, tryIndex, hndIndex, dspBool(putInFilter), dspBool(runRarely), dspBool(insertAtEnd), afterBlk->bbNum); return fgNewBBinRegionWorker(jumpKind, afterBlk, regionIndex, putInTryRegion); } //------------------------------------------------------------------------ // Creates a new BasicBlock and inserts it in the same EH region as 'srcBlk'. // // See the implementation of fgNewBBinRegion() used by this one for more notes. // // Arguments: // jumpKind - the jump kind of the new block to create. // srcBlk - insert the new block in the same EH region as this block, and closely after it if possible. // // Return Value: // The new block. BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind, BasicBlock* srcBlk, bool runRarely /* = false */, bool insertAtEnd /* = false */) { assert(srcBlk != nullptr); const unsigned tryIndex = srcBlk->bbTryIndex; const unsigned hndIndex = srcBlk->bbHndIndex; bool putInFilter = false; // Check to see if we need to put the new block in a filter. We do if srcBlk is in a filter. // This can only be true if there is a handler index, and the handler region is more nested than the // try region (if any). This is because no EH regions can be nested within a filter. if (BasicBlock::ehIndexMaybeMoreNested(hndIndex, tryIndex)) { assert(hndIndex != 0); // If hndIndex is more nested, we must be in some handler! putInFilter = ehGetDsc(hndIndex - 1)->InFilterRegionBBRange(srcBlk); } return fgNewBBinRegion(jumpKind, tryIndex, hndIndex, srcBlk, putInFilter, runRarely, insertAtEnd); } //------------------------------------------------------------------------ // Creates a new BasicBlock and inserts it at the end of the function. // // See the implementation of fgNewBBinRegion() used by this one for more notes. // // Arguments: // jumpKind - the jump kind of the new block to create. // // Return Value: // The new block. BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind) { return fgNewBBinRegion(jumpKind, 0, 0, nullptr, /* putInFilter */ false, /* runRarely */ false, /* insertAtEnd */ true); } //------------------------------------------------------------------------ // Creates a new BasicBlock, and inserts it after 'afterBlk'. // // The block cannot be inserted into a more nested try/handler region than that specified by 'regionIndex'. // (It is given exactly 'regionIndex'.) Thus, the parameters must be passed to ensure proper EH nesting // rules are followed. // // Arguments: // jumpKind - the jump kind of the new block to create. // afterBlk - insert the new block after this one. // regionIndex - the block will be put in this EH region. // putInTryRegion - If true, put the new block in the 'try' region corresponding to 'regionIndex', and // set its handler index to the most nested handler region enclosing that 'try' region. // Otherwise, put the block in the handler region specified by 'regionIndex', and set its 'try' // index to the most nested 'try' region enclosing that handler region. // // Return Value: // The new block. BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, BasicBlock* afterBlk, unsigned regionIndex, bool putInTryRegion) { /* Insert the new block */ BasicBlock* afterBlkNext = afterBlk->bbNext; (void)afterBlkNext; // prevent "unused variable" error from GCC BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false); if (putInTryRegion) { noway_assert(regionIndex <= MAX_XCPTN_INDEX); newBlk->bbTryIndex = (unsigned short)regionIndex; newBlk->bbHndIndex = bbFindInnermostHandlerRegionContainingTryRegion(regionIndex); } else { newBlk->bbTryIndex = bbFindInnermostTryRegionContainingHandlerRegion(regionIndex); noway_assert(regionIndex <= MAX_XCPTN_INDEX); newBlk->bbHndIndex = (unsigned short)regionIndex; } // We're going to compare for equal try regions (to handle the case of 'mutually protect' // regions). We need to save off the current try region, otherwise we might change it // before it gets compared later, thereby making future comparisons fail. BasicBlock* newTryBeg; BasicBlock* newTryLast; (void)ehInitTryBlockRange(newBlk, &newTryBeg, &newTryLast); unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Is afterBlk at the end of a try region? if (HBtab->ebdTryLast == afterBlk) { noway_assert(afterBlkNext == newBlk->bbNext); bool extendTryRegion = false; if (newBlk->hasTryIndex()) { // We're adding a block after the last block of some try region. Do // we extend the try region to include the block, or not? // If the try region is exactly the same as the try region // associated with the new block (based on the block's try index, // which represents the innermost try the block is a part of), then // we extend it. // If the try region is a "parent" try region -- an enclosing try region // that has the same last block as the new block's try region -- then // we also extend. For example: // try { // 1 // ... // try { // 2 // ... // } /* 2 */ } /* 1 */ // This example is meant to indicate that both try regions 1 and 2 end at // the same block, and we're extending 2. Thus, we must also extend 1. If we // only extended 2, we would break proper nesting. (Dev11 bug 137967) extendTryRegion = HBtab->ebdIsSameTry(newTryBeg, newTryLast) || bbInTryRegions(XTnum, newBlk); } // Does newBlk extend this try region? if (extendTryRegion) { // Yes, newBlk extends this try region // newBlk is the now the new try last block fgSetTryEnd(HBtab, newBlk); } } // Is afterBlk at the end of a handler region? if (HBtab->ebdHndLast == afterBlk) { noway_assert(afterBlkNext == newBlk->bbNext); // Does newBlk extend this handler region? bool extendHndRegion = false; if (newBlk->hasHndIndex()) { // We're adding a block after the last block of some handler region. Do // we extend the handler region to include the block, or not? // If the handler region is exactly the same as the handler region // associated with the new block (based on the block's handler index, // which represents the innermost handler the block is a part of), then // we extend it. // If the handler region is a "parent" handler region -- an enclosing // handler region that has the same last block as the new block's handler // region -- then we also extend. For example: // catch { // 1 // ... // catch { // 2 // ... // } /* 2 */ } /* 1 */ // This example is meant to indicate that both handler regions 1 and 2 end at // the same block, and we're extending 2. Thus, we must also extend 1. If we // only extended 2, we would break proper nesting. (Dev11 bug 372051) extendHndRegion = bbInHandlerRegions(XTnum, newBlk); } if (extendHndRegion) { // Yes, newBlk extends this handler region // newBlk is now the last block of the handler. fgSetHndEnd(HBtab, newBlk); } } } /* If afterBlk falls through, we insert a jump around newBlk */ fgConnectFallThrough(afterBlk, newBlk->bbNext); #ifdef DEBUG fgVerifyHandlerTab(); #endif return newBlk; } //------------------------------------------------------------------------ // fgUseThrowHelperBlocks: Determinate does compiler use throw helper blocks. // // Note: // For debuggable code, codegen will generate the 'throw' code inline. // Return Value: // true if 'throw' helper block should be created. bool Compiler::fgUseThrowHelperBlocks() { return !opts.compDbgCode; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif // Flowgraph Construction and Maintenance void Compiler::fgInit() { impInit(); /* Initialization for fgWalkTreePre() and fgWalkTreePost() */ fgFirstBBScratch = nullptr; #ifdef DEBUG fgPrintInlinedMethods = false; #endif // DEBUG /* We haven't yet computed the bbPreds lists */ fgComputePredsDone = false; /* We haven't yet computed the bbCheapPreds lists */ fgCheapPredsValid = false; /* We haven't yet computed the edge weight */ fgEdgeWeightsComputed = false; fgHaveValidEdgeWeights = false; fgSlopUsedInEdgeWeights = false; fgRangeUsedInEdgeWeights = true; fgNeedsUpdateFlowGraph = false; fgCalledCount = BB_ZERO_WEIGHT; /* We haven't yet computed the dominator sets */ fgDomsComputed = false; fgReturnBlocksComputed = false; #ifdef DEBUG fgReachabilitySetsValid = false; #endif // DEBUG /* We don't know yet which loops will always execute calls */ fgLoopCallMarked = false; /* Initialize the basic block list */ fgFirstBB = nullptr; fgLastBB = nullptr; fgFirstColdBlock = nullptr; fgEntryBB = nullptr; fgOSREntryBB = nullptr; #if defined(FEATURE_EH_FUNCLETS) fgFirstFuncletBB = nullptr; fgFuncletsCreated = false; #endif // FEATURE_EH_FUNCLETS fgBBcount = 0; #ifdef DEBUG fgBBcountAtCodegen = 0; #endif // DEBUG fgBBNumMax = 0; fgEdgeCount = 0; fgDomBBcount = 0; fgBBVarSetsInited = false; fgReturnCount = 0; // Initialize BlockSet data. fgCurBBEpoch = 0; fgCurBBEpochSize = 0; fgBBSetCountInSizeTUnits = 0; genReturnBB = nullptr; genReturnLocal = BAD_VAR_NUM; /* We haven't reached the global morphing phase */ fgGlobalMorph = false; fgModified = false; #ifdef DEBUG fgSafeBasicBlockCreation = true; #endif // DEBUG fgLocalVarLivenessDone = false; /* Statement list is not threaded yet */ fgStmtListThreaded = false; // Initialize the logic for adding code. This is used to insert code such // as the code that raises an exception when an array range check fails. fgAddCodeList = nullptr; fgAddCodeModf = false; for (int i = 0; i < SCK_COUNT; i++) { fgExcptnTargetCache[i] = nullptr; } /* Keep track of the max count of pointer arguments */ fgPtrArgCntMax = 0; /* This global flag is set whenever we remove a statement */ fgStmtRemoved = false; /* This global flag is set whenever we add a throw block for a RngChk */ fgRngChkThrowAdded = false; /* reset flag for fgIsCodeAdded() */ /* Keep track of whether or not EH statements have been optimized */ fgOptimizedFinally = false; /* We will record a list of all BBJ_RETURN blocks here */ fgReturnBlocks = nullptr; /* This is set by fgComputeReachability */ fgEnterBlks = BlockSetOps::UninitVal(); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) fgAlwaysBlks = BlockSetOps::UninitVal(); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG fgEnterBlksSetValid = false; #endif // DEBUG #if !defined(FEATURE_EH_FUNCLETS) ehMaxHndNestingCount = 0; #endif // !FEATURE_EH_FUNCLETS /* Init the fgBigOffsetMorphingTemps to be BAD_VAR_NUM. */ for (int i = 0; i < TYP_COUNT; i++) { fgBigOffsetMorphingTemps[i] = BAD_VAR_NUM; } fgNoStructPromotion = false; fgNoStructParamPromotion = false; optValnumCSE_phase = false; // referenced in fgMorphSmpOp() #ifdef DEBUG fgNormalizeEHDone = false; #endif // DEBUG #ifdef DEBUG if (!compIsForInlining()) { const int noStructPromotionValue = JitConfig.JitNoStructPromotion(); assert(0 <= noStructPromotionValue && noStructPromotionValue <= 2); if (noStructPromotionValue == 1) { fgNoStructPromotion = true; } if (noStructPromotionValue == 2) { fgNoStructParamPromotion = true; } } #endif // DEBUG if (!compIsForInlining()) { m_promotedStructDeathVars = nullptr; } #ifdef FEATURE_SIMD fgPreviousCandidateSIMDFieldAsgStmt = nullptr; #endif fgHasSwitch = false; fgPgoDisabled = false; fgPgoSchema = nullptr; fgPgoData = nullptr; fgPgoSchemaCount = 0; fgNumProfileRuns = 0; fgPgoBlockCounts = 0; fgPgoEdgeCounts = 0; fgPgoClassProfiles = 0; fgPgoInlineePgo = 0; fgPgoInlineeNoPgo = 0; fgPgoInlineeNoPgoSingleBlock = 0; fgCountInstrumentor = nullptr; fgClassInstrumentor = nullptr; fgPredListSortVector = nullptr; } /***************************************************************************** * * Create a basic block and append it to the current BB list. */ BasicBlock* Compiler::fgNewBasicBlock(BBjumpKinds jumpKind) { // This method must not be called after the exception table has been // constructed, because it doesn't not provide support for patching // the exception table. noway_assert(compHndBBtabCount == 0); BasicBlock* block; /* Allocate the block descriptor */ block = bbNewBasicBlock(jumpKind); noway_assert(block->bbJumpKind == jumpKind); /* Append the block to the end of the global basic block list */ if (fgFirstBB) { fgLastBB->setNext(block); } else { fgFirstBB = block; block->bbPrev = nullptr; } fgLastBB = block; return block; } //------------------------------------------------------------------------ // fgEnsureFirstBBisScratch: Ensure that fgFirstBB is a scratch BasicBlock // // Returns: // Nothing. May allocate a new block and alter the value of fgFirstBB. // // Notes: // This should be called before adding on-entry initialization code to // the method, to ensure that fgFirstBB is not part of a loop. // // Does nothing, if fgFirstBB is already a scratch BB. After calling this, // fgFirstBB may already contain code. Callers have to be careful // that they do not mess up the order of things added to this block and // inadvertently change semantics. // // We maintain the invariant that a scratch BB ends with BBJ_NONE or // BBJ_ALWAYS, so that when adding independent bits of initialization, // callers can generally append to the fgFirstBB block without worring // about what code is there already. // // Can be called at any time, and can be called multiple times. // void Compiler::fgEnsureFirstBBisScratch() { // Have we already allocated a scratch block? if (fgFirstBBisScratch()) { return; } assert(fgFirstBBScratch == nullptr); BasicBlock* block = bbNewBasicBlock(BBJ_NONE); if (fgFirstBB != nullptr) { // If we have profile data the new block will inherit fgFirstBlock's weight if (fgFirstBB->hasProfileWeight()) { block->inheritWeight(fgFirstBB); } // The first block has an implicit ref count which we must // remove. Note the ref count could be greater that one, if // the first block is not scratch and is targeted by a // branch. assert(fgFirstBB->bbRefs >= 1); fgFirstBB->bbRefs--; // The new scratch bb will fall through to the old first bb fgAddRefPred(fgFirstBB, block); fgInsertBBbefore(fgFirstBB, block); } else { noway_assert(fgLastBB == nullptr); fgFirstBB = block; fgLastBB = block; } noway_assert(fgLastBB != nullptr); // Set the expected flags block->bbFlags |= (BBF_INTERNAL | BBF_IMPORTED); // This new first BB has an implicit ref, and no others. block->bbRefs = 1; fgFirstBBScratch = fgFirstBB; #ifdef DEBUG if (verbose) { printf("New scratch " FMT_BB "\n", block->bbNum); } #endif } //------------------------------------------------------------------------ // fgFirstBBisScratch: Check if fgFirstBB is a scratch block // // Returns: // true if fgFirstBB is a scratch block. // bool Compiler::fgFirstBBisScratch() { if (fgFirstBBScratch != nullptr) { assert(fgFirstBBScratch == fgFirstBB); assert(fgFirstBBScratch->bbFlags & BBF_INTERNAL); assert(fgFirstBBScratch->countOfInEdges() == 1); // Normally, the first scratch block is a fall-through block. However, if the block after it was an empty // BBJ_ALWAYS block, it might get removed, and the code that removes it will make the first scratch block // a BBJ_ALWAYS block. assert(fgFirstBBScratch->KindIs(BBJ_NONE, BBJ_ALWAYS)); return true; } else { return false; } } //------------------------------------------------------------------------ // fgBBisScratch: Check if a given block is a scratch block. // // Arguments: // block - block in question // // Returns: // true if this block is the first block and is a scratch block. // bool Compiler::fgBBisScratch(BasicBlock* block) { return fgFirstBBisScratch() && (block == fgFirstBB); } /* Removes a block from the return block list */ void Compiler::fgRemoveReturnBlock(BasicBlock* block) { if (fgReturnBlocks == nullptr) { return; } if (fgReturnBlocks->block == block) { // It's the 1st entry, assign new head of list. fgReturnBlocks = fgReturnBlocks->next; return; } for (BasicBlockList* retBlocks = fgReturnBlocks; retBlocks->next != nullptr; retBlocks = retBlocks->next) { if (retBlocks->next->block == block) { // Found it; splice it out. retBlocks->next = retBlocks->next->next; return; } } } /***************************************************************************** * fgChangeSwitchBlock: * * We have a BBJ_SWITCH jump at 'oldSwitchBlock' and we want to move this * switch jump over to 'newSwitchBlock'. All of the blocks that are jumped * to from jumpTab[] need to have their predecessor lists updated by removing * the 'oldSwitchBlock' and adding 'newSwitchBlock'. */ void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSwitchBlock) { noway_assert(oldSwitchBlock != nullptr); noway_assert(newSwitchBlock != nullptr); noway_assert(oldSwitchBlock->bbJumpKind == BBJ_SWITCH); // Walk the switch's jump table, updating the predecessor for each branch. for (BasicBlock* const bJump : oldSwitchBlock->SwitchTargets()) { noway_assert(bJump != nullptr); // Note that if there are duplicate branch targets in the switch jump table, // fgRemoveRefPred()/fgAddRefPred() will do the right thing: the second and // subsequent duplicates will simply subtract from and add to the duplicate // count (respectively). if (bJump->countOfInEdges() > 0) { // // Remove the old edge [oldSwitchBlock => bJump] // fgRemoveRefPred(bJump, oldSwitchBlock); } else { // bJump->countOfInEdges() must not be zero after preds are calculated. assert(!fgComputePredsDone); } // // Create the new edge [newSwitchBlock => bJump] // fgAddRefPred(bJump, newSwitchBlock); } if (m_switchDescMap != nullptr) { SwitchUniqueSuccSet uniqueSuccSet; // If already computed and cached the unique descriptors for the old block, let's // update those for the new block. if (m_switchDescMap->Lookup(oldSwitchBlock, &uniqueSuccSet)) { m_switchDescMap->Set(newSwitchBlock, uniqueSuccSet, BlockToSwitchDescMap::Overwrite); } else { fgInvalidateSwitchDescMapEntry(newSwitchBlock); } fgInvalidateSwitchDescMapEntry(oldSwitchBlock); } } //------------------------------------------------------------------------ // fgReplaceSwitchJumpTarget: update BBJ_SWITCH block so that all control // that previously flowed to oldTarget now flows to newTarget. // // Arguments: // blockSwitch - block ending in a switch // newTarget - new branch target // oldTarget - old branch target // // Notes: // Updates the jump table and the cached unique target set (if any). // Can be called before or after pred lists are built. // If pred lists are built, updates pred lists. // void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* newTarget, BasicBlock* oldTarget) { noway_assert(blockSwitch != nullptr); noway_assert(newTarget != nullptr); noway_assert(oldTarget != nullptr); noway_assert(blockSwitch->bbJumpKind == BBJ_SWITCH); // For the jump targets values that match oldTarget of our BBJ_SWITCH // replace predecessor 'blockSwitch' with 'newTarget' // unsigned jumpCnt = blockSwitch->bbJumpSwt->bbsCount; BasicBlock** jumpTab = blockSwitch->bbJumpSwt->bbsDstTab; unsigned i = 0; // Walk the switch's jump table looking for blocks to update the preds for while (i < jumpCnt) { if (jumpTab[i] == oldTarget) // We will update when jumpTab[i] matches { // Remove the old edge [oldTarget from blockSwitch] // if (fgComputePredsDone) { fgRemoveAllRefPreds(oldTarget, blockSwitch); } // // Change the jumpTab entry to branch to the new location // jumpTab[i] = newTarget; // // Create the new edge [newTarget from blockSwitch] // flowList* newEdge = nullptr; if (fgComputePredsDone) { newEdge = fgAddRefPred(newTarget, blockSwitch); } // Now set the correct value of newEdge->flDupCount // and replace any other jumps in jumpTab[] that go to oldTarget. // i++; while (i < jumpCnt) { if (jumpTab[i] == oldTarget) { // // We also must update this entry in the jumpTab // jumpTab[i] = newTarget; newTarget->bbRefs++; // // Increment the flDupCount // if (fgComputePredsDone) { newEdge->flDupCount++; } } i++; // Check the next entry in jumpTab[] } // Maintain, if necessary, the set of unique targets of "block." UpdateSwitchTableTarget(blockSwitch, oldTarget, newTarget); return; // We have replaced the jumps to oldTarget with newTarget } i++; // Check the next entry in jumpTab[] for a match } noway_assert(!"Did not find oldTarget in jumpTab[]"); } //------------------------------------------------------------------------ // Compiler::fgReplaceJumpTarget: For a given block, replace the target 'oldTarget' with 'newTarget'. // // Arguments: // block - the block in which a jump target will be replaced. // newTarget - the new branch target of the block. // oldTarget - the old branch target of the block. // // Notes: // 1. Only branches are changed: BBJ_ALWAYS, the non-fallthrough path of BBJ_COND, BBJ_SWITCH, etc. // We ignore other block types. // 2. All branch targets found are updated. If there are multiple ways for a block // to reach 'oldTarget' (e.g., multiple arms of a switch), all of them are changed. // 3. The predecessor lists are not changed. // 4. If any switch table entry was updated, the switch table "unique successor" cache is invalidated. // // This function is most useful early, before the full predecessor lists have been computed. // void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, BasicBlock* oldTarget) { assert(block != nullptr); switch (block->bbJumpKind) { case BBJ_CALLFINALLY: case BBJ_COND: case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_EHFILTERRET: case BBJ_LEAVE: // This function will be called before import, so we still have BBJ_LEAVE if (block->bbJumpDest == oldTarget) { block->bbJumpDest = newTarget; } break; case BBJ_NONE: case BBJ_EHFINALLYRET: case BBJ_THROW: case BBJ_RETURN: break; case BBJ_SWITCH: { unsigned const jumpCnt = block->bbJumpSwt->bbsCount; BasicBlock** const jumpTab = block->bbJumpSwt->bbsDstTab; bool changed = false; for (unsigned i = 0; i < jumpCnt; i++) { if (jumpTab[i] == oldTarget) { jumpTab[i] = newTarget; changed = true; } } if (changed) { InvalidateUniqueSwitchSuccMap(); } break; } default: assert(!"Block doesn't have a valid bbJumpKind!!!!"); unreached(); break; } } //------------------------------------------------------------------------ // fgReplacePred: update the predecessor list, swapping one pred for another // // Arguments: // block - block with the pred list we want to update // oldPred - pred currently appearing in block's pred list // newPred - pred that will take oldPred's place. // // Notes: // // A block can only appear once in the preds list (for normal preds, not // cheap preds): if a predecessor has multiple ways to get to this block, then // flDupCount will be >1, but the block will still appear exactly once. Thus, this // function assumes that all branches from the predecessor (practically, that all // switch cases that target this block) are changed to branch from the new predecessor, // with the same dup count. // // Note that the block bbRefs is not changed, since 'block' has the same number of // references as before, just from a different predecessor block. // // Also note this may cause sorting of the pred list. // void Compiler::fgReplacePred(BasicBlock* block, BasicBlock* oldPred, BasicBlock* newPred) { noway_assert(block != nullptr); noway_assert(oldPred != nullptr); noway_assert(newPred != nullptr); assert(!fgCheapPredsValid); bool modified = false; for (flowList* const pred : block->PredEdges()) { if (oldPred == pred->getBlock()) { pred->setBlock(newPred); modified = true; break; } } // We may now need to reorder the pred list. // if (modified) { block->ensurePredListOrder(this); } } /***************************************************************************** * For a block that is in a handler region, find the first block of the most-nested * handler containing the block. */ BasicBlock* Compiler::fgFirstBlockOfHandler(BasicBlock* block) { assert(block->hasHndIndex()); return ehGetDsc(block->getHndIndex())->ebdHndBeg; } /***************************************************************************** * * The following helps find a basic block given its PC offset. */ void Compiler::fgInitBBLookup() { BasicBlock** dscBBptr; /* Allocate the basic block table */ dscBBptr = fgBBs = new (this, CMK_BasicBlock) BasicBlock*[fgBBcount]; /* Walk all the basic blocks, filling in the table */ for (BasicBlock* const block : Blocks()) { *dscBBptr++ = block; } noway_assert(dscBBptr == fgBBs + fgBBcount); } BasicBlock* Compiler::fgLookupBB(unsigned addr) { unsigned lo; unsigned hi; /* Do a binary search */ for (lo = 0, hi = fgBBcount - 1;;) { AGAIN:; if (lo > hi) { break; } unsigned mid = (lo + hi) / 2; BasicBlock* dsc = fgBBs[mid]; // We introduce internal blocks for BBJ_CALLFINALLY. Skip over these. while (dsc->bbFlags & BBF_INTERNAL) { dsc = dsc->bbNext; mid++; // We skipped over too many, Set hi back to the original mid - 1 if (mid > hi) { mid = (lo + hi) / 2; hi = mid - 1; goto AGAIN; } } unsigned pos = dsc->bbCodeOffs; if (pos < addr) { if ((lo == hi) && (lo == (fgBBcount - 1))) { noway_assert(addr == dsc->bbCodeOffsEnd); return nullptr; // NULL means the end of method } lo = mid + 1; continue; } if (pos > addr) { hi = mid - 1; continue; } return dsc; } #ifdef DEBUG printf("ERROR: Couldn't find basic block at offset %04X\n", addr); #endif // DEBUG NO_WAY("fgLookupBB failed."); } //------------------------------------------------------------------------ // FgStack: simple stack model for the inlinee's evaluation stack. // // Model the inputs available to various operations in the inline body. // Tracks constants, arguments, array lengths. class FgStack { public: FgStack() : slot0(SLOT_INVALID), slot1(SLOT_INVALID), depth(0) { // Empty } enum FgSlot { SLOT_INVALID = UINT_MAX, SLOT_UNKNOWN = 0, SLOT_CONSTANT = 1, SLOT_ARRAYLEN = 2, SLOT_ARGUMENT = 3 }; void Clear() { depth = 0; } void PushUnknown() { Push(SLOT_UNKNOWN); } void PushConstant() { Push(SLOT_CONSTANT); } void PushArrayLen() { Push(SLOT_ARRAYLEN); } void PushArgument(unsigned arg) { Push((FgSlot)(SLOT_ARGUMENT + arg)); } FgSlot GetSlot0() const { return depth >= 1 ? slot0 : FgSlot::SLOT_UNKNOWN; } FgSlot GetSlot1() const { return depth >= 2 ? slot1 : FgSlot::SLOT_UNKNOWN; } FgSlot Top(const int n = 0) { if (n == 0) { return depth >= 1 ? slot0 : SLOT_UNKNOWN; } if (n == 1) { return depth == 2 ? slot1 : SLOT_UNKNOWN; } unreached(); } static bool IsConstant(FgSlot value) { return value == SLOT_CONSTANT; } static bool IsConstantOrConstArg(FgSlot value, InlineInfo* info) { return IsConstant(value) || IsConstArgument(value, info); } static bool IsArrayLen(FgSlot value) { return value == SLOT_ARRAYLEN; } static bool IsArgument(FgSlot value) { return value >= SLOT_ARGUMENT; } static bool IsConstArgument(FgSlot value, InlineInfo* info) { if ((info == nullptr) || !IsArgument(value)) { return false; } const unsigned argNum = value - SLOT_ARGUMENT; if (argNum < info->argCnt) { return info->inlArgInfo[argNum].argIsInvariant; } return false; } static bool IsExactArgument(FgSlot value, InlineInfo* info) { if ((info == nullptr) || !IsArgument(value)) { return false; } const unsigned argNum = value - SLOT_ARGUMENT; if (argNum < info->argCnt) { return info->inlArgInfo[argNum].argIsExact; } return false; } static unsigned SlotTypeToArgNum(FgSlot value) { assert(IsArgument(value)); return value - SLOT_ARGUMENT; } bool IsStackTwoDeep() const { return depth == 2; } bool IsStackOneDeep() const { return depth == 1; } bool IsStackAtLeastOneDeep() const { return depth >= 1; } void Push(FgSlot slot) { assert(depth <= 2); slot1 = slot0; slot0 = slot; if (depth < 2) { depth++; } } private: FgSlot slot0; FgSlot slot1; unsigned depth; }; void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget) { const BYTE* codeBegp = codeAddr; const BYTE* codeEndp = codeAddr + codeSize; unsigned varNum; var_types varType = DUMMY_INIT(TYP_UNDEF); // TYP_ type typeInfo ti; // Verifier type. bool typeIsNormed = false; FgStack pushedStack; const bool isForceInline = (info.compFlags & CORINFO_FLG_FORCEINLINE) != 0; const bool makeInlineObservations = (compInlineResult != nullptr); const bool isInlining = compIsForInlining(); unsigned retBlocks = 0; int prefixFlags = 0; bool preciseScan = makeInlineObservations && compInlineResult->GetPolicy()->RequiresPreciseScan(); const bool resolveTokens = preciseScan; // Track offsets where IL instructions begin in DEBUG builds. Used to // validate debug info generated by the JIT. assert(codeSize == compInlineContext->GetILSize()); INDEBUG(FixedBitVect* ilInstsSet = FixedBitVect::bitVectInit(codeSize, this)); if (makeInlineObservations) { // Set default values for profile (to avoid NoteFailed in CALLEE_IL_CODE_SIZE's handler) // these will be overridden later. compInlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, true); compInlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, 1.0); // Observe force inline state and code size. compInlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, isForceInline); compInlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize); // Determine if call site is within a try. if (isInlining && impInlineInfo->iciBlock->hasTryIndex()) { compInlineResult->Note(InlineObservation::CALLSITE_IN_TRY_REGION); } // Determine if the call site is in a no-return block if (isInlining && (impInlineInfo->iciBlock->bbJumpKind == BBJ_THROW)) { compInlineResult->Note(InlineObservation::CALLSITE_IN_NORETURN_REGION); } // Determine if the call site is in a loop. if (isInlining && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) { compInlineResult->Note(InlineObservation::CALLSITE_IN_LOOP); } #ifdef DEBUG // If inlining, this method should still be a candidate. if (isInlining) { assert(compInlineResult->IsCandidate()); } #endif // DEBUG // note that we're starting to look at the opcodes. compInlineResult->Note(InlineObservation::CALLEE_BEGIN_OPCODE_SCAN); } CORINFO_RESOLVED_TOKEN resolvedToken; OPCODE opcode = CEE_NOP; OPCODE prevOpcode = CEE_NOP; bool handled = false; while (codeAddr < codeEndp) { prevOpcode = opcode; opcode = (OPCODE)getU1LittleEndian(codeAddr); INDEBUG(ilInstsSet->bitVectSet((UINT)(codeAddr - codeBegp))); codeAddr += sizeof(__int8); if (!handled && preciseScan) { // Push something unknown to the stack since we couldn't find anything useful for inlining pushedStack.PushUnknown(); } handled = false; DECODE_OPCODE: if ((unsigned)opcode >= CEE_COUNT) { BADCODE3("Illegal opcode", ": %02X", (int)opcode); } if ((opcode >= CEE_LDARG_0 && opcode <= CEE_STLOC_S) || (opcode >= CEE_LDARG && opcode <= CEE_STLOC)) { opts.lvRefCount++; } if (makeInlineObservations && (opcode >= CEE_LDNULL) && (opcode <= CEE_LDC_R8)) { // LDTOKEN and LDSTR are handled below pushedStack.PushConstant(); handled = true; } unsigned sz = opcodeSizes[opcode]; switch (opcode) { case CEE_PREFIX1: { if (codeAddr >= codeEndp) { goto TOO_FAR; } opcode = (OPCODE)(256 + getU1LittleEndian(codeAddr)); codeAddr += sizeof(__int8); goto DECODE_OPCODE; } case CEE_PREFIX2: case CEE_PREFIX3: case CEE_PREFIX4: case CEE_PREFIX5: case CEE_PREFIX6: case CEE_PREFIX7: case CEE_PREFIXREF: { BADCODE3("Illegal opcode", ": %02X", (int)opcode); } case CEE_SIZEOF: case CEE_LDTOKEN: case CEE_LDSTR: { if (preciseScan) { pushedStack.PushConstant(); handled = true; } break; } case CEE_DUP: { if (preciseScan) { pushedStack.Push(pushedStack.Top()); handled = true; } break; } case CEE_THROW: { if (makeInlineObservations) { compInlineResult->Note(InlineObservation::CALLEE_THROW_BLOCK); } break; } case CEE_BOX: { if (makeInlineObservations) { int toSkip = impBoxPatternMatch(nullptr, codeAddr + sz, codeEndp, true); if (toSkip > 0) { // toSkip > 0 means we most likely will hit a pattern (e.g. box+isinst+brtrue) that // will be folded into a const if (preciseScan) { codeAddr += toSkip; } } } break; } case CEE_CASTCLASS: case CEE_ISINST: { if (makeInlineObservations) { FgStack::FgSlot slot = pushedStack.Top(); if (FgStack::IsConstantOrConstArg(slot, impInlineInfo) || FgStack::IsExactArgument(slot, impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_EXPR_UN); handled = true; // and keep argument in the pushedStack } else if (FgStack::IsArgument(slot)) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CAST); handled = true; // and keep argument in the pushedStack } } break; } case CEE_CALL: case CEE_CALLVIRT: { // There has to be code after the call, otherwise the inlinee is unverifiable. if (isInlining) { noway_assert(codeAddr < codeEndp - sz); } if (!makeInlineObservations) { break; } CORINFO_METHOD_HANDLE methodHnd = nullptr; bool isIntrinsic = false; NamedIntrinsic ni = NI_Illegal; if (resolveTokens) { impResolveToken(codeAddr, &resolvedToken, CORINFO_TOKENKIND_Method); methodHnd = resolvedToken.hMethod; isIntrinsic = eeIsIntrinsic(methodHnd); } if (isIntrinsic) { ni = lookupNamedIntrinsic(methodHnd); bool foldableIntrinsic = false; if (IsMathIntrinsic(ni)) { // Most Math(F) intrinsics have single arguments foldableIntrinsic = FgStack::IsConstantOrConstArg(pushedStack.Top(), impInlineInfo); } else { switch (ni) { // These are most likely foldable without arguments case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: case NI_System_Enum_HasFlag: case NI_System_GC_KeepAlive: { pushedStack.PushUnknown(); foldableIntrinsic = true; break; } case NI_System_Span_get_Item: case NI_System_ReadOnlySpan_get_Item: { if (FgStack::IsArgument(pushedStack.Top(0)) || FgStack::IsArgument(pushedStack.Top(1))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK); } break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant: if (FgStack::IsConstArgument(pushedStack.Top(), impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLEE_CONST_ARG_FEEDS_ISCONST); } else { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_ISCONST); } // RuntimeHelpers.IsKnownConstant is always folded into a const pushedStack.PushConstant(); foldableIntrinsic = true; break; // These are foldable if the first argument is a constant case NI_System_Type_get_IsValueType: case NI_System_Type_GetTypeFromHandle: case NI_System_String_get_Length: case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness: case NI_System_Numerics_BitOperations_PopCount: #if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS) case NI_Vector128_Create: case NI_Vector256_Create: #elif defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS) case NI_Vector64_Create: case NI_Vector128_Create: #endif { // Top() in order to keep it as is in case of foldableIntrinsic if (FgStack::IsConstantOrConstArg(pushedStack.Top(), impInlineInfo)) { foldableIntrinsic = true; } break; } // These are foldable if two arguments are constants case NI_System_Type_op_Equality: case NI_System_Type_op_Inequality: case NI_System_String_get_Chars: case NI_System_Type_IsAssignableTo: case NI_System_Type_IsAssignableFrom: { if (FgStack::IsConstantOrConstArg(pushedStack.Top(0), impInlineInfo) && FgStack::IsConstantOrConstArg(pushedStack.Top(1), impInlineInfo)) { foldableIntrinsic = true; pushedStack.PushConstant(); } break; } case NI_IsSupported_True: case NI_IsSupported_False: { foldableIntrinsic = true; pushedStack.PushConstant(); break; } #if defined(TARGET_XARCH) && defined(FEATURE_HW_INTRINSICS) case NI_Vector128_get_Count: case NI_Vector256_get_Count: foldableIntrinsic = true; pushedStack.PushConstant(); // TODO: check if it's a loop condition - we unroll such loops. break; case NI_Vector256_get_Zero: case NI_Vector256_get_AllBitsSet: foldableIntrinsic = true; pushedStack.PushUnknown(); break; #elif defined(TARGET_ARM64) && defined(FEATURE_HW_INTRINSICS) case NI_Vector64_get_Count: case NI_Vector128_get_Count: foldableIntrinsic = true; pushedStack.PushConstant(); break; case NI_Vector128_get_Zero: case NI_Vector128_get_AllBitsSet: foldableIntrinsic = true; pushedStack.PushUnknown(); break; #endif default: { break; } } } if (foldableIntrinsic) { compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_INTRINSIC); handled = true; } else if (ni != NI_Illegal) { // Otherwise note "intrinsic" (most likely will be lowered as single instructions) // except Math where only a few intrinsics won't end up as normal calls if (!IsMathIntrinsic(ni) || IsTargetIntrinsic(ni)) { compInlineResult->Note(InlineObservation::CALLEE_INTRINSIC); } } } if ((codeAddr < codeEndp - sz) && (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET) { // If the method has a call followed by a ret, assume that // it is a wrapper method. compInlineResult->Note(InlineObservation::CALLEE_LOOKS_LIKE_WRAPPER); } if (!isIntrinsic && !handled && FgStack::IsArgument(pushedStack.Top())) { // Optimistically assume that "call(arg)" returns something arg-dependent. // However, we don't know how many args it expects and its return type. handled = true; } } break; case CEE_LDIND_I1: case CEE_LDIND_U1: case CEE_LDIND_I2: case CEE_LDIND_U2: case CEE_LDIND_I4: case CEE_LDIND_U4: case CEE_LDIND_I8: case CEE_LDIND_I: case CEE_LDIND_R4: case CEE_LDIND_R8: case CEE_LDIND_REF: { if (FgStack::IsArgument(pushedStack.Top())) { handled = true; } break; } // Unary operators: case CEE_CONV_I: case CEE_CONV_U: case CEE_CONV_I1: case CEE_CONV_I2: case CEE_CONV_I4: case CEE_CONV_I8: case CEE_CONV_R4: case CEE_CONV_R8: case CEE_CONV_U4: case CEE_CONV_U8: case CEE_CONV_U2: case CEE_CONV_U1: case CEE_CONV_R_UN: case CEE_CONV_OVF_I: case CEE_CONV_OVF_U: case CEE_CONV_OVF_I1: case CEE_CONV_OVF_U1: case CEE_CONV_OVF_I2: case CEE_CONV_OVF_U2: case CEE_CONV_OVF_I4: case CEE_CONV_OVF_U4: case CEE_CONV_OVF_I8: case CEE_CONV_OVF_U8: case CEE_CONV_OVF_I_UN: case CEE_CONV_OVF_U_UN: case CEE_CONV_OVF_I1_UN: case CEE_CONV_OVF_I2_UN: case CEE_CONV_OVF_I4_UN: case CEE_CONV_OVF_I8_UN: case CEE_CONV_OVF_U1_UN: case CEE_CONV_OVF_U2_UN: case CEE_CONV_OVF_U4_UN: case CEE_CONV_OVF_U8_UN: case CEE_NOT: case CEE_NEG: { if (makeInlineObservations) { FgStack::FgSlot arg = pushedStack.Top(); if (FgStack::IsConstArgument(arg, impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_EXPR_UN); handled = true; } else if (FgStack::IsArgument(arg) || FgStack::IsConstant(arg)) { handled = true; } } break; } // Binary operators: case CEE_ADD: case CEE_SUB: case CEE_MUL: case CEE_DIV: case CEE_DIV_UN: case CEE_REM: case CEE_REM_UN: case CEE_AND: case CEE_OR: case CEE_XOR: case CEE_SHL: case CEE_SHR: case CEE_SHR_UN: case CEE_ADD_OVF: case CEE_ADD_OVF_UN: case CEE_MUL_OVF: case CEE_MUL_OVF_UN: case CEE_SUB_OVF: case CEE_SUB_OVF_UN: case CEE_CEQ: case CEE_CGT: case CEE_CGT_UN: case CEE_CLT: case CEE_CLT_UN: { if (!makeInlineObservations) { break; } if (!preciseScan) { switch (opcode) { case CEE_CEQ: case CEE_CGT: case CEE_CGT_UN: case CEE_CLT: case CEE_CLT_UN: fgObserveInlineConstants(opcode, pushedStack, isInlining); break; default: break; } } else { FgStack::FgSlot arg0 = pushedStack.Top(1); FgStack::FgSlot arg1 = pushedStack.Top(0); // Const op ConstArg -> ConstArg if (FgStack::IsConstant(arg0) && FgStack::IsConstArgument(arg1, impInlineInfo)) { // keep stack unchanged handled = true; compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_EXPR); } // ConstArg op Const -> ConstArg // ConstArg op ConstArg -> ConstArg else if (FgStack::IsConstArgument(arg0, impInlineInfo) && FgStack::IsConstantOrConstArg(arg1, impInlineInfo)) { if (FgStack::IsConstant(arg1)) { pushedStack.Push(arg0); } handled = true; compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_EXPR); } // Const op Const -> Const else if (FgStack::IsConstant(arg0) && FgStack::IsConstant(arg1)) { // both are constants, but we're mostly interested in cases where a const arg leads to // a foldable expression. handled = true; } // Arg op ConstArg // Arg op Const else if (FgStack::IsArgument(arg0) && FgStack::IsConstantOrConstArg(arg1, impInlineInfo)) { // "Arg op CNS" --> keep arg0 in the stack for the next ops pushedStack.Push(arg0); handled = true; compInlineResult->Note(InlineObservation::CALLEE_BINARY_EXRP_WITH_CNS); } // ConstArg op Arg // Const op Arg else if (FgStack::IsArgument(arg1) && FgStack::IsConstantOrConstArg(arg0, impInlineInfo)) { // "CNS op ARG" --> keep arg1 in the stack for the next ops handled = true; compInlineResult->Note(InlineObservation::CALLEE_BINARY_EXRP_WITH_CNS); } // X / ConstArg // X % ConstArg if (FgStack::IsConstArgument(arg1, impInlineInfo)) { if ((opcode == CEE_DIV) || (opcode == CEE_DIV_UN) || (opcode == CEE_REM) || (opcode == CEE_REM_UN)) { compInlineResult->Note(InlineObservation::CALLSITE_DIV_BY_CNS); } pushedStack.Push(arg0); handled = true; } } break; } // Jumps case CEE_LEAVE: case CEE_LEAVE_S: case CEE_BR: case CEE_BR_S: case CEE_BRFALSE: case CEE_BRFALSE_S: case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BEQ: case CEE_BEQ_S: case CEE_BGE: case CEE_BGE_S: case CEE_BGE_UN: case CEE_BGE_UN_S: case CEE_BGT: case CEE_BGT_S: case CEE_BGT_UN: case CEE_BGT_UN_S: case CEE_BLE: case CEE_BLE_S: case CEE_BLE_UN: case CEE_BLE_UN_S: case CEE_BLT: case CEE_BLT_S: case CEE_BLT_UN: case CEE_BLT_UN_S: case CEE_BNE_UN: case CEE_BNE_UN_S: { if (codeAddr > codeEndp - sz) { goto TOO_FAR; } // Compute jump target address signed jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0 && (opcode == CEE_LEAVE || opcode == CEE_LEAVE_S || opcode == CEE_BR || opcode == CEE_BR_S)) { break; /* NOP */ } unsigned jmpAddr = (IL_OFFSET)(codeAddr - codeBegp) + sz + jmpDist; // Make sure target is reasonable if (jmpAddr >= codeSize) { BADCODE3("code jumps to outer space", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } if (makeInlineObservations && (jmpDist < 0)) { compInlineResult->Note(InlineObservation::CALLEE_BACKWARD_JUMP); } // Mark the jump target jumpTarget->bitVectSet(jmpAddr); // See if jump might be sensitive to inlining if (!preciseScan && makeInlineObservations && (opcode != CEE_BR_S) && (opcode != CEE_BR)) { fgObserveInlineConstants(opcode, pushedStack, isInlining); } else if (preciseScan && makeInlineObservations) { switch (opcode) { // Binary case CEE_BEQ: case CEE_BGE: case CEE_BGT: case CEE_BLE: case CEE_BLT: case CEE_BNE_UN: case CEE_BGE_UN: case CEE_BGT_UN: case CEE_BLE_UN: case CEE_BLT_UN: case CEE_BEQ_S: case CEE_BGE_S: case CEE_BGT_S: case CEE_BLE_S: case CEE_BLT_S: case CEE_BNE_UN_S: case CEE_BGE_UN_S: case CEE_BGT_UN_S: case CEE_BLE_UN_S: case CEE_BLT_UN_S: { FgStack::FgSlot op1 = pushedStack.Top(1); FgStack::FgSlot op2 = pushedStack.Top(0); if (FgStack::IsConstantOrConstArg(op1, impInlineInfo) && FgStack::IsConstantOrConstArg(op2, impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_BRANCH); } if (FgStack::IsConstArgument(op1, impInlineInfo) || FgStack::IsConstArgument(op2, impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); } if ((FgStack::IsArgument(op1) && FgStack::IsArrayLen(op2)) || (FgStack::IsArgument(op2) && FgStack::IsArrayLen(op1))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK); } else if ((FgStack::IsArgument(op1) && FgStack::IsConstantOrConstArg(op2, impInlineInfo)) || (FgStack::IsArgument(op2) && FgStack::IsConstantOrConstArg(op1, impInlineInfo))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST); } else if (FgStack::IsArgument(op1) || FgStack::IsArgument(op2)) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_TEST); } else if (FgStack::IsConstant(op1) || FgStack::IsConstant(op2)) { compInlineResult->Note(InlineObservation::CALLEE_BINARY_EXRP_WITH_CNS); } break; } // Unary case CEE_BRFALSE_S: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRTRUE: { if (FgStack::IsConstantOrConstArg(pushedStack.Top(), impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_BRANCH); } else if (FgStack::IsArgument(pushedStack.Top())) { // E.g. brtrue is basically "if (X == 0)" compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST); } break; } default: break; } } } break; case CEE_LDFLDA: case CEE_LDFLD: case CEE_STFLD: { if (FgStack::IsArgument(pushedStack.Top())) { compInlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT_FIELD_ACCESS); handled = true; // keep argument on top of the stack } break; } case CEE_LDELEM_I1: case CEE_LDELEM_U1: case CEE_LDELEM_I2: case CEE_LDELEM_U2: case CEE_LDELEM_I4: case CEE_LDELEM_U4: case CEE_LDELEM_I8: case CEE_LDELEM_I: case CEE_LDELEM_R4: case CEE_LDELEM_R8: case CEE_LDELEM_REF: case CEE_STELEM_I: case CEE_STELEM_I1: case CEE_STELEM_I2: case CEE_STELEM_I4: case CEE_STELEM_I8: case CEE_STELEM_R4: case CEE_STELEM_R8: case CEE_STELEM_REF: case CEE_LDELEM: case CEE_STELEM: { if (!preciseScan) { break; } if (FgStack::IsArgument(pushedStack.Top()) || FgStack::IsArgument(pushedStack.Top(1))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK); } break; } case CEE_SWITCH: { if (makeInlineObservations) { compInlineResult->Note(InlineObservation::CALLEE_HAS_SWITCH); if (FgStack::IsConstantOrConstArg(pushedStack.Top(), impInlineInfo)) { compInlineResult->Note(InlineObservation::CALLSITE_FOLDABLE_SWITCH); } // Fail fast, if we're inlining and can't handle this. if (isInlining && compInlineResult->IsFailure()) { return; } } // Make sure we don't go past the end reading the number of cases if (codeAddr > codeEndp - sizeof(DWORD)) { goto TOO_FAR; } // Read the number of cases unsigned jmpCnt = getU4LittleEndian(codeAddr); codeAddr += sizeof(DWORD); if (jmpCnt > codeSize / sizeof(DWORD)) { goto TOO_FAR; } // Find the end of the switch table unsigned jmpBase = (unsigned)((codeAddr - codeBegp) + jmpCnt * sizeof(DWORD)); // Make sure there is more code after the switch if (jmpBase >= codeSize) { goto TOO_FAR; } // jmpBase is also the target of the default case, so mark it jumpTarget->bitVectSet(jmpBase); // Process table entries while (jmpCnt > 0) { unsigned jmpAddr = jmpBase + getI4LittleEndian(codeAddr); codeAddr += 4; if (jmpAddr >= codeSize) { BADCODE3("jump target out of range", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } jumpTarget->bitVectSet(jmpAddr); jmpCnt--; } // We've advanced past all the bytes in this instruction sz = 0; } break; case CEE_UNALIGNED: { noway_assert(sz == sizeof(__int8)); prefixFlags |= PREFIX_UNALIGNED; codeAddr += sizeof(__int8); impValidateMemoryAccessOpcode(codeAddr, codeEndp, false); handled = true; goto OBSERVE_OPCODE; } case CEE_CONSTRAINED: { noway_assert(sz == sizeof(unsigned)); prefixFlags |= PREFIX_CONSTRAINED; codeAddr += sizeof(unsigned); { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN) { BADCODE("constrained. has to be followed by callvirt, call or ldftn"); } } handled = true; goto OBSERVE_OPCODE; } case CEE_READONLY: { noway_assert(sz == 0); prefixFlags |= PREFIX_READONLY; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if ((actualOpcode != CEE_LDELEMA) && !impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("readonly. has to be followed by ldelema or call"); } } handled = true; goto OBSERVE_OPCODE; } case CEE_VOLATILE: { noway_assert(sz == 0); prefixFlags |= PREFIX_VOLATILE; impValidateMemoryAccessOpcode(codeAddr, codeEndp, true); handled = true; goto OBSERVE_OPCODE; } case CEE_TAILCALL: { noway_assert(sz == 0); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("tailcall. has to be followed by call, callvirt or calli"); } } handled = true; goto OBSERVE_OPCODE; } case CEE_STARG: case CEE_STARG_S: { noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD)); if (codeAddr > codeEndp - sz) { goto TOO_FAR; } varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); if (isInlining) { if (varNum < impInlineInfo->argCnt) { impInlineInfo->inlArgInfo[varNum].argHasStargOp = true; } } else { // account for possible hidden param varNum = compMapILargNum(varNum); // This check is only intended to prevent an AV. Bad varNum values will later // be handled properly by the verifier. if (varNum < lvaTableCnt) { // In non-inline cases, note written-to arguments. lvaTable[varNum].lvHasILStoreOp = 1; } } } break; case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: varNum = (opcode - CEE_STLOC_0); goto STLOC; case CEE_STLOC: case CEE_STLOC_S: { noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD)); if (codeAddr > codeEndp - sz) { goto TOO_FAR; } varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); STLOC: if (isInlining) { InlLclVarInfo& lclInfo = impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt]; if (lclInfo.lclHasStlocOp) { lclInfo.lclHasMultipleStlocOp = 1; } else { lclInfo.lclHasStlocOp = 1; } } else { varNum += info.compArgsCount; // This check is only intended to prevent an AV. Bad varNum values will later // be handled properly by the verifier. if (varNum < lvaTableCnt) { // In non-inline cases, note written-to locals. if (lvaTable[varNum].lvHasILStoreOp) { lvaTable[varNum].lvHasMultipleILStoreOp = 1; } else { lvaTable[varNum].lvHasILStoreOp = 1; } } } } break; case CEE_LDLOC_0: case CEE_LDLOC_1: case CEE_LDLOC_2: case CEE_LDLOC_3: // if (preciseScan && makeInlineObservations && (prevOpcode == (CEE_STLOC_3 - (CEE_LDLOC_3 - opcode)))) { // Fold stloc+ldloc pushedStack.Push(pushedStack.Top(1)); // throw away SLOT_UNKNOWN inserted by STLOC handled = true; } break; case CEE_LDARGA: case CEE_LDARGA_S: case CEE_LDLOCA: case CEE_LDLOCA_S: { // Handle address-taken args or locals noway_assert(sz == sizeof(BYTE) || sz == sizeof(WORD)); if (codeAddr > codeEndp - sz) { goto TOO_FAR; } varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); if (isInlining) { if (opcode == CEE_LDLOCA || opcode == CEE_LDLOCA_S) { varType = impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt].lclTypeInfo; ti = impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt].lclVerTypeInfo; impInlineInfo->lclVarInfo[varNum + impInlineInfo->argCnt].lclHasLdlocaOp = true; } else { noway_assert(opcode == CEE_LDARGA || opcode == CEE_LDARGA_S); varType = impInlineInfo->lclVarInfo[varNum].lclTypeInfo; ti = impInlineInfo->lclVarInfo[varNum].lclVerTypeInfo; impInlineInfo->inlArgInfo[varNum].argHasLdargaOp = true; pushedStack.PushArgument(varNum); handled = true; } } else { if (opcode == CEE_LDLOCA || opcode == CEE_LDLOCA_S) { if (varNum >= info.compMethodInfo->locals.numArgs) { BADCODE("bad local number"); } varNum += info.compArgsCount; } else { noway_assert(opcode == CEE_LDARGA || opcode == CEE_LDARGA_S); if (varNum >= info.compILargsCount) { BADCODE("bad argument number"); } varNum = compMapILargNum(varNum); // account for possible hidden param } varType = (var_types)lvaTable[varNum].lvType; ti = lvaTable[varNum].lvVerTypeInfo; // Determine if the next instruction will consume // the address. If so we won't mark this var as // address taken. // // We will put structs on the stack and changing // the addrTaken of a local requires an extra pass // in the morpher so we won't apply this // optimization to structs. // // Debug code spills for every IL instruction, and // therefore it will split statements, so we will // need the address. Note that this optimization // is based in that we know what trees we will // generate for this ldfld, and we require that we // won't need the address of this local at all const bool notStruct = !varTypeIsStruct(lvaGetDesc(varNum)); const bool notLastInstr = (codeAddr < codeEndp - sz); const bool notDebugCode = !opts.compDbgCode; if (notStruct && notLastInstr && notDebugCode && impILConsumesAddr(codeAddr + sz)) { // We can skip the addrtaken, as next IL instruction consumes // the address. } else { lvaTable[varNum].lvHasLdAddrOp = 1; if (!info.compIsStatic && (varNum == 0)) { // Addr taken on "this" pointer is significant, // go ahead to mark it as permanently addr-exposed here. // This may be conservative, but probably not very. lvaSetVarAddrExposed(0 DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE)); } } } // isInlining typeIsNormed = ti.IsValueClass() && !varTypeIsStruct(varType); } break; case CEE_JMP: retBlocks++; #if !defined(TARGET_X86) && !defined(TARGET_ARM) if (!isInlining) { // We transform this into a set of ldarg's + tail call and // thus may push more onto the stack than originally thought. // This doesn't interfere with verification because CEE_JMP // is never verifiable, and there's nothing unsafe you can // do with a an IL stack overflow if the JIT is expecting it. info.compMaxStack = max(info.compMaxStack, info.compILargsCount); break; } #endif // !TARGET_X86 && !TARGET_ARM // If we are inlining, we need to fail for a CEE_JMP opcode, just like // the list of other opcodes (for all platforms). FALLTHROUGH; case CEE_MKREFANY: case CEE_RETHROW: if (makeInlineObservations) { // Arguably this should be NoteFatal, but the legacy behavior is // to ignore this for the prejit root. compInlineResult->Note(InlineObservation::CALLEE_UNSUPPORTED_OPCODE); // Fail fast if we're inlining... if (isInlining) { assert(compInlineResult->IsFailure()); return; } } break; case CEE_LOCALLOC: compLocallocSeen = true; // We now allow localloc callees to become candidates in some cases. if (makeInlineObservations) { compInlineResult->Note(InlineObservation::CALLEE_HAS_LOCALLOC); if (isInlining && compInlineResult->IsFailure()) { return; } } break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: if (makeInlineObservations) { pushedStack.PushArgument(opcode - CEE_LDARG_0); handled = true; } break; case CEE_LDARG_S: case CEE_LDARG: { if (codeAddr > codeEndp - sz) { goto TOO_FAR; } varNum = (sz == sizeof(BYTE)) ? getU1LittleEndian(codeAddr) : getU2LittleEndian(codeAddr); if (makeInlineObservations) { pushedStack.PushArgument(varNum); handled = true; } } break; case CEE_LDLEN: if (makeInlineObservations) { pushedStack.PushArrayLen(); handled = true; } break; case CEE_RET: retBlocks++; break; default: break; } // Skip any remaining operands this opcode may have codeAddr += sz; // Clear any prefix flags that may have been set prefixFlags = 0; // Increment the number of observed instructions opts.instrCount++; OBSERVE_OPCODE: // Note the opcode we just saw if (makeInlineObservations) { InlineObservation obs = typeIsNormed ? InlineObservation::CALLEE_OPCODE_NORMED : InlineObservation::CALLEE_OPCODE; compInlineResult->NoteInt(obs, opcode); } typeIsNormed = false; } if (codeAddr != codeEndp) { TOO_FAR: BADCODE3("Code ends in the middle of an opcode, or there is a branch past the end of the method", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } INDEBUG(compInlineContext->SetILInstsSet(ilInstsSet)); if (makeInlineObservations) { compInlineResult->Note(InlineObservation::CALLEE_END_OPCODE_SCAN); // If there are no return blocks we know it does not return, however if there // return blocks we don't know it returns as it may be counting unreachable code. // However we will still make the CALLEE_DOES_NOT_RETURN observation. compInlineResult->NoteBool(InlineObservation::CALLEE_DOES_NOT_RETURN, retBlocks == 0); if (retBlocks == 0 && isInlining) { // Mark the call node as "no return" as it can impact caller's code quality. impInlineInfo->iciCall->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; // Mark root method as containing a noreturn call. impInlineRoot()->setMethodHasNoReturnCalls(); } // If the inline is viable and discretionary, do the // profitability screening. if (compInlineResult->IsDiscretionaryCandidate()) { // Make some callsite specific observations that will feed // into the profitability model. impMakeDiscretionaryInlineObservations(impInlineInfo, compInlineResult); // None of those observations should have changed the // inline's viability. assert(compInlineResult->IsCandidate()); if (isInlining) { // Assess profitability... CORINFO_METHOD_INFO* methodInfo = &impInlineInfo->inlineCandidateInfo->methInfo; compInlineResult->DetermineProfitability(methodInfo); if (compInlineResult->IsFailure()) { impInlineRoot()->m_inlineStrategy->NoteUnprofitable(); JITDUMP("\n\nInline expansion aborted, inline not profitable\n"); return; } else { // The inline is still viable. assert(compInlineResult->IsCandidate()); } } else { // Prejit root case. Profitability assessment for this // is done over in compCompileHelper. } } } // None of the local vars in the inlinee should have address taken or been written to. // Therefore we should NOT need to enter this "if" statement. if (!isInlining && !info.compIsStatic) { fgAdjustForAddressExposedOrWrittenThis(); } // Now that we've seen the IL, set lvSingleDef for root method // locals. // // We could also do this for root method arguments but single-def // arguments are set by the caller and so we don't know anything // about the possible values or types. // // For inlinees we do this over in impInlineFetchLocal and // impInlineFetchArg (here args are included as we somtimes get // new information about the types of inlinee args). if (!isInlining) { const unsigned firstLcl = info.compArgsCount; const unsigned lastLcl = firstLcl + info.compMethodInfo->locals.numArgs; for (unsigned lclNum = firstLcl; lclNum < lastLcl; lclNum++) { LclVarDsc* lclDsc = lvaGetDesc(lclNum); assert(lclDsc->lvSingleDef == 0); // could restrict this to TYP_REF lclDsc->lvSingleDef = !lclDsc->lvHasMultipleILStoreOp && !lclDsc->lvHasLdAddrOp; if (lclDsc->lvSingleDef) { JITDUMP("Marked V%02u as a single def local\n", lclNum); } } } } //------------------------------------------------------------------------ // fgAdjustForAddressExposedOrWrittenThis: update var table for cases // where the this pointer value can change. // // Notes: // Modifies lvaArg0Var to refer to a temp if the value of 'this' can // change. The original this (info.compThisArg) then remains // unmodified in the method. fgAddInternal is reponsible for // adding the code to copy the initial this into the temp. void Compiler::fgAdjustForAddressExposedOrWrittenThis() { LclVarDsc* thisVarDsc = lvaGetDesc(info.compThisArg); // Optionally enable adjustment during stress. if (compStressCompile(STRESS_GENERIC_VARN, 15)) { thisVarDsc->lvHasILStoreOp = true; } // If this is exposed or written to, create a temp for the modifiable this if (thisVarDsc->IsAddressExposed() || thisVarDsc->lvHasILStoreOp) { // If there is a "ldarga 0" or "starg 0", grab and use the temp. lvaArg0Var = lvaGrabTemp(false DEBUGARG("Address-exposed, or written this pointer")); noway_assert(lvaArg0Var > (unsigned)info.compThisArg); LclVarDsc* arg0varDsc = lvaGetDesc(lvaArg0Var); arg0varDsc->lvType = thisVarDsc->TypeGet(); arg0varDsc->SetAddressExposed(thisVarDsc->IsAddressExposed() DEBUGARG(thisVarDsc->GetAddrExposedReason())); arg0varDsc->lvDoNotEnregister = thisVarDsc->lvDoNotEnregister; #ifdef DEBUG arg0varDsc->SetDoNotEnregReason(thisVarDsc->GetDoNotEnregReason()); #endif arg0varDsc->lvHasILStoreOp = thisVarDsc->lvHasILStoreOp; arg0varDsc->lvVerTypeInfo = thisVarDsc->lvVerTypeInfo; // Clear the TI_FLAG_THIS_PTR in the original 'this' pointer. noway_assert(arg0varDsc->lvVerTypeInfo.IsThisPtr()); thisVarDsc->lvVerTypeInfo.ClearThisPtr(); // Note that here we don't clear `m_doNotEnregReason` and it stays // `doNotEnreg` with `AddrExposed` reason. thisVarDsc->CleanAddressExposed(); thisVarDsc->lvHasILStoreOp = false; } } //------------------------------------------------------------------------ // fgObserveInlineConstants: look for operations that might get optimized // if this method were to be inlined, and report these to the inliner. // // Arguments: // opcode -- MSIL opcode under consideration // stack -- abstract stack model at this point in the IL // isInlining -- true if we're inlining (vs compiling a prejit root) // // Notes: // Currently only invoked on compare and branch opcodes. // // If we're inlining we also look at the argument values supplied by // the caller at this call site. // // The crude stack model may overestimate stack depth. void Compiler::fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, bool isInlining) { // We should be able to record inline observations. assert(compInlineResult != nullptr); // The stack only has to be 1 deep for BRTRUE/FALSE bool lookForBranchCases = stack.IsStackAtLeastOneDeep(); if (lookForBranchCases) { if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S || opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) { FgStack::FgSlot slot0 = stack.GetSlot0(); if (FgStack::IsArgument(slot0)) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST); if (isInlining) { // Check for the double whammy of an incoming constant argument // feeding a constant test. unsigned varNum = FgStack::SlotTypeToArgNum(slot0); if (impInlineInfo->inlArgInfo[varNum].argIsInvariant) { compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); } } } return; } } // Remaining cases require at least two things on the stack. if (!stack.IsStackTwoDeep()) { return; } FgStack::FgSlot slot0 = stack.GetSlot0(); FgStack::FgSlot slot1 = stack.GetSlot1(); // Arg feeds constant test if ((FgStack::IsConstant(slot0) && FgStack::IsArgument(slot1)) || (FgStack::IsConstant(slot1) && FgStack::IsArgument(slot0))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST); } // Arg feeds range check if ((FgStack::IsArrayLen(slot0) && FgStack::IsArgument(slot1)) || (FgStack::IsArrayLen(slot1) && FgStack::IsArgument(slot0))) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK); } // Check for an incoming arg that's a constant if (isInlining) { if (FgStack::IsArgument(slot0)) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_TEST); unsigned varNum = FgStack::SlotTypeToArgNum(slot0); if (impInlineInfo->inlArgInfo[varNum].argIsInvariant) { compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); } } if (FgStack::IsArgument(slot1)) { compInlineResult->Note(InlineObservation::CALLEE_ARG_FEEDS_TEST); unsigned varNum = FgStack::SlotTypeToArgNum(slot1); if (impInlineInfo->inlArgInfo[varNum].argIsInvariant) { compInlineResult->Note(InlineObservation::CALLSITE_CONSTANT_ARG_FEEDS_TEST); } } } } #ifdef _PREFAST_ #pragma warning(pop) #endif //------------------------------------------------------------------------ // fgMarkBackwardJump: mark blocks indicating there is a jump backwards in // IL, from a higher to lower IL offset. // // Arguments: // targetBlock -- target of the jump // sourceBlock -- source of the jump void Compiler::fgMarkBackwardJump(BasicBlock* targetBlock, BasicBlock* sourceBlock) { noway_assert(targetBlock->bbNum <= sourceBlock->bbNum); for (BasicBlock* const block : Blocks(targetBlock, sourceBlock)) { if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && (block->bbJumpKind != BBJ_RETURN)) { block->bbFlags |= BBF_BACKWARD_JUMP; compHasBackwardJump = true; } } sourceBlock->bbFlags |= BBF_BACKWARD_JUMP_SOURCE; targetBlock->bbFlags |= BBF_BACKWARD_JUMP_TARGET; } /***************************************************************************** * * Finally link up the bbJumpDest of the blocks together */ void Compiler::fgLinkBasicBlocks() { /* Create the basic block lookup tables */ fgInitBBLookup(); /* First block is always reachable */ fgFirstBB->bbRefs = 1; /* Walk all the basic blocks, filling in the target addresses */ for (BasicBlock* const curBBdesc : Blocks()) { switch (curBBdesc->bbJumpKind) { case BBJ_COND: case BBJ_ALWAYS: case BBJ_LEAVE: curBBdesc->bbJumpDest = fgLookupBB(curBBdesc->bbJumpOffs); curBBdesc->bbJumpDest->bbRefs++; if (curBBdesc->bbJumpDest->bbNum <= curBBdesc->bbNum) { fgMarkBackwardJump(curBBdesc->bbJumpDest, curBBdesc); } /* Is the next block reachable? */ if (curBBdesc->KindIs(BBJ_ALWAYS, BBJ_LEAVE)) { break; } if (!curBBdesc->bbNext) { BADCODE("Fall thru the end of a method"); } // Fall through, the next block is also reachable FALLTHROUGH; case BBJ_NONE: curBBdesc->bbNext->bbRefs++; break; case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_THROW: case BBJ_RETURN: break; case BBJ_SWITCH: unsigned jumpCnt; jumpCnt = curBBdesc->bbJumpSwt->bbsCount; BasicBlock** jumpPtr; jumpPtr = curBBdesc->bbJumpSwt->bbsDstTab; do { *jumpPtr = fgLookupBB((unsigned)*(size_t*)jumpPtr); (*jumpPtr)->bbRefs++; if ((*jumpPtr)->bbNum <= curBBdesc->bbNum) { fgMarkBackwardJump(*jumpPtr, curBBdesc); } } while (++jumpPtr, --jumpCnt); /* Default case of CEE_SWITCH (next block), is at end of jumpTab[] */ noway_assert(*(jumpPtr - 1) == curBBdesc->bbNext); break; case BBJ_CALLFINALLY: // BBJ_CALLFINALLY and BBJ_EHCATCHRET don't appear until later case BBJ_EHCATCHRET: default: noway_assert(!"Unexpected bbJumpKind"); break; } } } //------------------------------------------------------------------------ // fgMakeBasicBlocks: walk the IL creating basic blocks, and look for // operations that might get optimized if this method were to be inlined. // // Arguments: // codeAddr -- starting address of the method's IL stream // codeSize -- length of the IL stream // jumpTarget -- [in] bit vector of jump targets found by fgFindJumpTargets // // Returns: // number of return blocks (BBJ_RETURN) in the method (may be zero) // // Notes: // Invoked for prejited and jitted methods, and for all inlinees unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, FixedBitVect* jumpTarget) { unsigned retBlocks = 0; const BYTE* codeBegp = codeAddr; const BYTE* codeEndp = codeAddr + codeSize; bool tailCall = false; unsigned curBBoffs = 0; BasicBlock* curBBdesc; // Keep track of where we are in the scope lists, as we will also // create blocks at scope boundaries. if (opts.compDbgCode && (info.compVarScopesCount > 0)) { compResetScopeLists(); // Ignore scopes beginning at offset 0 while (compGetNextEnterScope(0)) { /* do nothing */ } while (compGetNextExitScope(0)) { /* do nothing */ } } do { unsigned jmpAddr = DUMMY_INIT(BAD_IL_OFFSET); BasicBlockFlags bbFlags = BBF_EMPTY; BBswtDesc* swtDsc = nullptr; unsigned nxtBBoffs; OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); BBjumpKinds jmpKind = BBJ_NONE; DECODE_OPCODE: /* Get the size of additional parameters */ noway_assert((unsigned)opcode < CEE_COUNT); unsigned sz = opcodeSizes[opcode]; switch (opcode) { signed jmpDist; case CEE_PREFIX1: if (jumpTarget->bitVectTest((UINT)(codeAddr - codeBegp))) { BADCODE3("jump target between prefix 0xFE and opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } opcode = (OPCODE)(256 + getU1LittleEndian(codeAddr)); codeAddr += sizeof(__int8); goto DECODE_OPCODE; /* Check to see if we have a jump/return opcode */ case CEE_BRFALSE: case CEE_BRFALSE_S: case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BEQ: case CEE_BEQ_S: case CEE_BGE: case CEE_BGE_S: case CEE_BGE_UN: case CEE_BGE_UN_S: case CEE_BGT: case CEE_BGT_S: case CEE_BGT_UN: case CEE_BGT_UN_S: case CEE_BLE: case CEE_BLE_S: case CEE_BLE_UN: case CEE_BLE_UN_S: case CEE_BLT: case CEE_BLT_S: case CEE_BLT_UN: case CEE_BLT_UN_S: case CEE_BNE_UN: case CEE_BNE_UN_S: jmpKind = BBJ_COND; goto JMP; case CEE_LEAVE: case CEE_LEAVE_S: // We need to check if we are jumping out of a finally-protected try. jmpKind = BBJ_LEAVE; goto JMP; case CEE_BR: case CEE_BR_S: jmpKind = BBJ_ALWAYS; goto JMP; JMP: /* Compute the target address of the jump */ jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0 && (opcode == CEE_BR || opcode == CEE_BR_S)) { continue; /* NOP */ } jmpAddr = (IL_OFFSET)(codeAddr - codeBegp) + sz + jmpDist; break; case CEE_SWITCH: { unsigned jmpBase; unsigned jmpCnt; // # of switch cases (excluding default) BasicBlock** jmpTab; BasicBlock** jmpPtr; /* Allocate the switch descriptor */ swtDsc = new (this, CMK_BasicBlock) BBswtDesc; /* Read the number of entries in the table */ jmpCnt = getU4LittleEndian(codeAddr); codeAddr += 4; /* Compute the base offset for the opcode */ jmpBase = (IL_OFFSET)((codeAddr - codeBegp) + jmpCnt * sizeof(DWORD)); /* Allocate the jump table */ jmpPtr = jmpTab = new (this, CMK_BasicBlock) BasicBlock*[jmpCnt + 1]; /* Fill in the jump table */ for (unsigned count = jmpCnt; count; count--) { jmpDist = getI4LittleEndian(codeAddr); codeAddr += 4; // store the offset in the pointer. We change these in fgLinkBasicBlocks(). *jmpPtr++ = (BasicBlock*)(size_t)(jmpBase + jmpDist); } /* Append the default label to the target table */ *jmpPtr++ = (BasicBlock*)(size_t)jmpBase; /* Make sure we found the right number of labels */ noway_assert(jmpPtr == jmpTab + jmpCnt + 1); /* Compute the size of the switch opcode operands */ sz = sizeof(DWORD) + jmpCnt * sizeof(DWORD); /* Fill in the remaining fields of the switch descriptor */ swtDsc->bbsCount = jmpCnt + 1; swtDsc->bbsDstTab = jmpTab; /* This is definitely a jump */ jmpKind = BBJ_SWITCH; fgHasSwitch = true; if (opts.compProcedureSplitting) { // TODO-CQ: We might need to create a switch table; we won't know for sure until much later. // However, switch tables don't work with hot/cold splitting, currently. The switch table data needs // a relocation such that if the base (the first block after the prolog) and target of the switch // branch are put in different sections, the difference stored in the table is updated. However, our // relocation implementation doesn't support three different pointers (relocation address, base, and // target). So, we need to change our switch table implementation to be more like // JIT64: put the table in the code section, in the same hot/cold section as the switch jump itself // (maybe immediately after the switch jump), and make the "base" address be also in that section, // probably the address after the switch jump. opts.compProcedureSplitting = false; JITDUMP("Turning off procedure splitting for this method, as it might need switch tables; " "implementation limitation.\n"); } } goto GOT_ENDP; case CEE_ENDFILTER: bbFlags |= BBF_DONT_REMOVE; jmpKind = BBJ_EHFILTERRET; break; case CEE_ENDFINALLY: jmpKind = BBJ_EHFINALLYRET; break; case CEE_TAILCALL: if (compIsForInlining()) { // TODO-CQ: We can inline some callees with explicit tail calls if we can guarantee that the calls // can be dispatched as tail calls from the caller. compInlineResult->NoteFatal(InlineObservation::CALLEE_EXPLICIT_TAIL_PREFIX); retBlocks++; return retBlocks; } FALLTHROUGH; case CEE_READONLY: case CEE_CONSTRAINED: case CEE_VOLATILE: case CEE_UNALIGNED: // fgFindJumpTargets should have ruled out this possibility // (i.e. a prefix opcodes as last intruction in a block) noway_assert(codeAddr < codeEndp); if (jumpTarget->bitVectTest((UINT)(codeAddr - codeBegp))) { BADCODE3("jump target between prefix and an opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } break; case CEE_CALL: case CEE_CALLVIRT: case CEE_CALLI: { if (compIsForInlining() || // Ignore tail call in the inlinee. Period. (!tailCall && !compTailCallStress()) // A new BB with BBJ_RETURN would have been created // after a tailcall statement. // We need to keep this invariant if we want to stress the tailcall. // That way, the potential (tail)call statement is always the last // statement in the block. // Otherwise, we will assert at the following line in fgMorphCall() // noway_assert(fgMorphStmt->GetNextStmt() == NULL); ) { // Neither .tailcall prefix, no tailcall stress. So move on. break; } // Make sure the code sequence is legal for the tail call. // If so, mark this BB as having a BBJ_RETURN. if (codeAddr >= codeEndp - sz) { BADCODE3("No code found after the call instruction", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } if (tailCall) { // impIsTailCallILPattern uses isRecursive flag to determine whether ret in a fallthrough block is // allowed. We don't know at this point whether the call is recursive so we conservatively pass // false. This will only affect explicit tail calls when IL verification is not needed for the // method. bool isRecursive = false; if (!impIsTailCallILPattern(tailCall, opcode, codeAddr + sz, codeEndp, isRecursive)) { BADCODE3("tail call not followed by ret", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } if (fgMayExplicitTailCall()) { compTailPrefixSeen = true; } } else { OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr + sz); if (nextOpcode != CEE_RET) { noway_assert(compTailCallStress()); // Next OPCODE is not a CEE_RET, bail the attempt to stress the tailcall. // (I.e. We will not make a new BB after the "call" statement.) break; } } } /* For tail call, we just call CORINFO_HELP_TAILCALL, and it jumps to the target. So we don't need an epilog - just like CORINFO_HELP_THROW. Make the block BBJ_RETURN, but we will change it to BBJ_THROW if the tailness of the call is satisfied. NOTE : The next instruction is guaranteed to be a CEE_RET and it will create another BasicBlock. But there may be an jump directly to that CEE_RET. If we want to avoid creating an unnecessary block, we need to check if the CEE_RETURN is the target of a jump. */ FALLTHROUGH; case CEE_JMP: /* These are equivalent to a return from the current method But instead of directly returning to the caller we jump and execute something else in between */ case CEE_RET: retBlocks++; jmpKind = BBJ_RETURN; break; case CEE_THROW: case CEE_RETHROW: jmpKind = BBJ_THROW; break; #ifdef DEBUG // make certain we did not forget any flow of control instructions // by checking the 'ctrl' field in opcode.def. First filter out all // non-ctrl instructions #define BREAK(name) \ case name: \ break; #define NEXT(name) \ case name: \ break; #define CALL(name) #define THROW(name) #undef RETURN // undef contract RETURN macro #define RETURN(name) #define META(name) #define BRANCH(name) #define COND_BRANCH(name) #define PHI(name) #define OPDEF(name, string, pop, push, oprType, opcType, l, s1, s2, ctrl) ctrl(name) #include "opcode.def" #undef OPDEF #undef PHI #undef BREAK #undef CALL #undef NEXT #undef THROW #undef RETURN #undef META #undef BRANCH #undef COND_BRANCH // These ctrl-flow opcodes don't need any special handling case CEE_NEWOBJ: // CTRL_CALL break; // what's left are forgotten instructions default: BADCODE("Unrecognized control Opcode"); break; #else // !DEBUG default: break; #endif // !DEBUG } /* Jump over the operand */ codeAddr += sz; GOT_ENDP: tailCall = (opcode == CEE_TAILCALL); /* Make sure a jump target isn't in the middle of our opcode */ if (sz) { IL_OFFSET offs = (IL_OFFSET)(codeAddr - codeBegp) - sz; // offset of the operand for (unsigned i = 0; i < sz; i++, offs++) { if (jumpTarget->bitVectTest(offs)) { BADCODE3("jump into the middle of an opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } } } /* Compute the offset of the next opcode */ nxtBBoffs = (IL_OFFSET)(codeAddr - codeBegp); bool foundScope = false; if (opts.compDbgCode && (info.compVarScopesCount > 0)) { while (compGetNextEnterScope(nxtBBoffs)) { foundScope = true; } while (compGetNextExitScope(nxtBBoffs)) { foundScope = true; } } /* Do we have a jump? */ if (jmpKind == BBJ_NONE) { /* No jump; make sure we don't fall off the end of the function */ if (codeAddr == codeEndp) { BADCODE3("missing return opcode", " at offset %04X", (IL_OFFSET)(codeAddr - codeBegp)); } /* If a label follows this opcode, we'll have to make a new BB */ bool makeBlock = jumpTarget->bitVectTest(nxtBBoffs); if (!makeBlock && foundScope) { makeBlock = true; #ifdef DEBUG if (verbose) { printf("Splitting at BBoffs = %04u\n", nxtBBoffs); } #endif // DEBUG } if (!makeBlock) { continue; } } /* We need to create a new basic block */ curBBdesc = fgNewBasicBlock(jmpKind); curBBdesc->bbFlags |= bbFlags; curBBdesc->bbRefs = 0; curBBdesc->bbCodeOffs = curBBoffs; curBBdesc->bbCodeOffsEnd = nxtBBoffs; switch (jmpKind) { case BBJ_SWITCH: curBBdesc->bbJumpSwt = swtDsc; break; case BBJ_COND: case BBJ_ALWAYS: case BBJ_LEAVE: noway_assert(jmpAddr != DUMMY_INIT(BAD_IL_OFFSET)); curBBdesc->bbJumpOffs = jmpAddr; break; default: break; } DBEXEC(verbose, curBBdesc->dspBlockHeader(this, false, false, false)); /* Remember where the next BB will start */ curBBoffs = nxtBBoffs; } while (codeAddr < codeEndp); noway_assert(codeAddr == codeEndp); /* Finally link up the bbJumpDest of the blocks together */ fgLinkBasicBlocks(); return retBlocks; } /***************************************************************************** * * Main entry point to discover the basic blocks for the current function. */ void Compiler::fgFindBasicBlocks() { #ifdef DEBUG if (verbose) { printf("*************** In fgFindBasicBlocks() for %s\n", info.compFullName); } // Call this here so any dump printing it inspires doesn't appear in the bb table. // fgStressBBProf(); #endif // Allocate the 'jump target' bit vector FixedBitVect* jumpTarget = FixedBitVect::bitVectInit(info.compILCodeSize + 1, this); // Walk the instrs to find all jump targets fgFindJumpTargets(info.compCode, info.compILCodeSize, jumpTarget); if (compDonotInline()) { return; } unsigned XTnum; /* Are there any exception handlers? */ if (info.compXcptnsCount > 0) { noway_assert(!compIsForInlining()); /* Check and mark all the exception handlers */ for (XTnum = 0; XTnum < info.compXcptnsCount; XTnum++) { CORINFO_EH_CLAUSE clause; info.compCompHnd->getEHinfo(info.compMethodHnd, XTnum, &clause); noway_assert(clause.HandlerLength != (unsigned)-1); if (clause.TryLength <= 0) { BADCODE("try block length <=0"); } /* Mark the 'try' block extent and the handler itself */ if (clause.TryOffset > info.compILCodeSize) { BADCODE("try offset is > codesize"); } jumpTarget->bitVectSet(clause.TryOffset); if (clause.TryOffset + clause.TryLength > info.compILCodeSize) { BADCODE("try end is > codesize"); } jumpTarget->bitVectSet(clause.TryOffset + clause.TryLength); if (clause.HandlerOffset > info.compILCodeSize) { BADCODE("handler offset > codesize"); } jumpTarget->bitVectSet(clause.HandlerOffset); if (clause.HandlerOffset + clause.HandlerLength > info.compILCodeSize) { BADCODE("handler end > codesize"); } jumpTarget->bitVectSet(clause.HandlerOffset + clause.HandlerLength); if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { if (clause.FilterOffset > info.compILCodeSize) { BADCODE("filter offset > codesize"); } jumpTarget->bitVectSet(clause.FilterOffset); } } } #ifdef DEBUG if (verbose) { bool anyJumpTargets = false; printf("Jump targets:\n"); for (unsigned i = 0; i < info.compILCodeSize + 1; i++) { if (jumpTarget->bitVectTest(i)) { anyJumpTargets = true; printf(" IL_%04x\n", i); } } if (!anyJumpTargets) { printf(" none\n"); } } #endif // DEBUG /* Now create the basic blocks */ unsigned retBlocks = fgMakeBasicBlocks(info.compCode, info.compILCodeSize, jumpTarget); if (compIsForInlining()) { #ifdef DEBUG // If fgFindJumpTargets marked the call as "no return" there // really should be no BBJ_RETURN blocks in the method. bool markedNoReturn = (impInlineInfo->iciCall->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0; assert((markedNoReturn && (retBlocks == 0)) || (!markedNoReturn && (retBlocks >= 1))); #endif // DEBUG if (compInlineResult->IsFailure()) { return; } noway_assert(info.compXcptnsCount == 0); compHndBBtab = impInlineInfo->InlinerCompiler->compHndBBtab; compHndBBtabAllocCount = impInlineInfo->InlinerCompiler->compHndBBtabAllocCount; // we probably only use the table, not add to it. compHndBBtabCount = impInlineInfo->InlinerCompiler->compHndBBtabCount; info.compXcptnsCount = impInlineInfo->InlinerCompiler->info.compXcptnsCount; // Use a spill temp for the return value if there are multiple return blocks, // or if the inlinee has GC ref locals. if ((info.compRetNativeType != TYP_VOID) && ((retBlocks > 1) || impInlineInfo->HasGcRefLocals())) { // If we've spilled the ret expr to a temp we can reuse the temp // as the inlinee return spill temp. // // Todo: see if it is even better to always use this existing temp // for return values, even if we otherwise wouldn't need a return spill temp... lvaInlineeReturnSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM) { // This temp should already have the type of the return value. JITDUMP("\nInliner: re-using pre-existing spill temp V%02u\n", lvaInlineeReturnSpillTemp); if (info.compRetType == TYP_REF) { // We may have co-opted an existing temp for the return spill. // We likely assumed it was single-def at the time, but now // we can see it has multiple definitions. if ((retBlocks > 1) && (lvaTable[lvaInlineeReturnSpillTemp].lvSingleDef == 1)) { // Make sure it is no longer marked single def. This is only safe // to do if we haven't ever updated the type. assert(!lvaTable[lvaInlineeReturnSpillTemp].lvClassInfoUpdated); JITDUMP("Marked return spill temp V%02u as NOT single def temp\n", lvaInlineeReturnSpillTemp); lvaTable[lvaInlineeReturnSpillTemp].lvSingleDef = 0; } } } else { // The lifetime of this var might expand multiple BBs. So it is a long lifetime compiler temp. lvaInlineeReturnSpillTemp = lvaGrabTemp(false DEBUGARG("Inline return value spill temp")); lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetType; // If the method returns a ref class, set the class of the spill temp // to the method's return value. We may update this later if it turns // out we can prove the method returns a more specific type. if (info.compRetType == TYP_REF) { // The return spill temp is single def only if the method has a single return block. if (retBlocks == 1) { lvaTable[lvaInlineeReturnSpillTemp].lvSingleDef = 1; JITDUMP("Marked return spill temp V%02u as a single def temp\n", lvaInlineeReturnSpillTemp); } CORINFO_CLASS_HANDLE retClassHnd = impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass; if (retClassHnd != nullptr) { lvaSetClass(lvaInlineeReturnSpillTemp, retClassHnd); } } } } return; } /* Mark all blocks within 'try' blocks as such */ if (info.compXcptnsCount == 0) { return; } if (info.compXcptnsCount > MAX_XCPTN_INDEX) { IMPL_LIMITATION("too many exception clauses"); } /* Allocate the exception handler table */ fgAllocEHTable(); /* Assume we don't need to sort the EH table (such that nested try/catch * appear before their try or handler parent). The EH verifier will notice * when we do need to sort it. */ fgNeedToSortEHTable = false; verInitEHTree(info.compXcptnsCount); EHNodeDsc* initRoot = ehnNext; // remember the original root since // it may get modified during insertion // Annotate BBs with exception handling information required for generating correct eh code // as well as checking for correct IL EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { CORINFO_EH_CLAUSE clause; info.compCompHnd->getEHinfo(info.compMethodHnd, XTnum, &clause); noway_assert(clause.HandlerLength != (unsigned)-1); // @DEPRECATED #ifdef DEBUG if (verbose) { dispIncomingEHClause(XTnum, clause); } #endif // DEBUG IL_OFFSET tryBegOff = clause.TryOffset; IL_OFFSET tryEndOff = tryBegOff + clause.TryLength; IL_OFFSET filterBegOff = 0; IL_OFFSET hndBegOff = clause.HandlerOffset; IL_OFFSET hndEndOff = hndBegOff + clause.HandlerLength; if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { filterBegOff = clause.FilterOffset; } if (tryEndOff > info.compILCodeSize) { BADCODE3("end of try block beyond end of method for try", " at offset %04X", tryBegOff); } if (hndEndOff > info.compILCodeSize) { BADCODE3("end of hnd block beyond end of method for try", " at offset %04X", tryBegOff); } HBtab->ebdTryBegOffset = tryBegOff; HBtab->ebdTryEndOffset = tryEndOff; HBtab->ebdFilterBegOffset = filterBegOff; HBtab->ebdHndBegOffset = hndBegOff; HBtab->ebdHndEndOffset = hndEndOff; /* Convert the various addresses to basic blocks */ BasicBlock* tryBegBB = fgLookupBB(tryBegOff); BasicBlock* tryEndBB = fgLookupBB(tryEndOff); // note: this can be NULL if the try region is at the end of the function BasicBlock* hndBegBB = fgLookupBB(hndBegOff); BasicBlock* hndEndBB = nullptr; BasicBlock* filtBB = nullptr; BasicBlock* block; // // Assert that the try/hnd beginning blocks are set up correctly // if (tryBegBB == nullptr) { BADCODE("Try Clause is invalid"); } if (hndBegBB == nullptr) { BADCODE("Handler Clause is invalid"); } #if HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION // This will change the block weight from 0 to 1 // and clear the rarely run flag hndBegBB->makeBlockHot(); #else hndBegBB->bbSetRunRarely(); // handler entry points are rarely executed #endif if (hndEndOff < info.compILCodeSize) { hndEndBB = fgLookupBB(hndEndOff); } if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { filtBB = HBtab->ebdFilter = fgLookupBB(clause.FilterOffset); filtBB->bbCatchTyp = BBCT_FILTER; hndBegBB->bbCatchTyp = BBCT_FILTER_HANDLER; #if HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION // This will change the block weight from 0 to 1 // and clear the rarely run flag filtBB->makeBlockHot(); #else filtBB->bbSetRunRarely(); // filter entry points are rarely executed #endif // Mark all BBs that belong to the filter with the XTnum of the corresponding handler for (block = filtBB; /**/; block = block->bbNext) { if (block == nullptr) { BADCODE3("Missing endfilter for filter", " at offset %04X", filtBB->bbCodeOffs); return; } // Still inside the filter block->setHndIndex(XTnum); if (block->bbJumpKind == BBJ_EHFILTERRET) { // Mark catch handler as successor. block->bbJumpDest = hndBegBB; assert(block->bbJumpDest->bbCatchTyp == BBCT_FILTER_HANDLER); break; } } if (!block->bbNext || block->bbNext != hndBegBB) { BADCODE3("Filter does not immediately precede handler for filter", " at offset %04X", filtBB->bbCodeOffs); } } else { HBtab->ebdTyp = clause.ClassToken; /* Set bbCatchTyp as appropriate */ if (clause.Flags & CORINFO_EH_CLAUSE_FINALLY) { hndBegBB->bbCatchTyp = BBCT_FINALLY; } else { if (clause.Flags & CORINFO_EH_CLAUSE_FAULT) { hndBegBB->bbCatchTyp = BBCT_FAULT; } else { hndBegBB->bbCatchTyp = clause.ClassToken; // These values should be non-zero value that will // not collide with real tokens for bbCatchTyp if (clause.ClassToken == 0) { BADCODE("Exception catch type is Null"); } noway_assert(clause.ClassToken != BBCT_FAULT); noway_assert(clause.ClassToken != BBCT_FINALLY); noway_assert(clause.ClassToken != BBCT_FILTER); noway_assert(clause.ClassToken != BBCT_FILTER_HANDLER); } } } /* Mark the initial block and last blocks in the 'try' region */ tryBegBB->bbFlags |= BBF_TRY_BEG; /* Prevent future optimizations of removing the first block */ /* of a TRY block and the first block of an exception handler */ tryBegBB->bbFlags |= BBF_DONT_REMOVE; hndBegBB->bbFlags |= BBF_DONT_REMOVE; hndBegBB->bbRefs++; // The first block of a handler gets an extra, "artificial" reference count. if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { filtBB->bbFlags |= BBF_DONT_REMOVE; filtBB->bbRefs++; // The first block of a filter gets an extra, "artificial" reference count. } tryBegBB->bbFlags |= BBF_DONT_REMOVE; hndBegBB->bbFlags |= BBF_DONT_REMOVE; // // Store the info to the table of EH block handlers // HBtab->ebdHandlerType = ToEHHandlerType(clause.Flags); HBtab->ebdTryBeg = tryBegBB; HBtab->ebdTryLast = (tryEndBB == nullptr) ? fgLastBB : tryEndBB->bbPrev; HBtab->ebdHndBeg = hndBegBB; HBtab->ebdHndLast = (hndEndBB == nullptr) ? fgLastBB : hndEndBB->bbPrev; // // Assert that all of our try/hnd blocks are setup correctly. // if (HBtab->ebdTryLast == nullptr) { BADCODE("Try Clause is invalid"); } if (HBtab->ebdHndLast == nullptr) { BADCODE("Handler Clause is invalid"); } // // Verify that it's legal // verInsertEhNode(&clause, HBtab); } // end foreach handler table entry fgSortEHTable(); // Next, set things related to nesting that depend on the sorting being complete. for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { /* Mark all blocks in the finally/fault or catch clause */ BasicBlock* tryBegBB = HBtab->ebdTryBeg; BasicBlock* hndBegBB = HBtab->ebdHndBeg; IL_OFFSET tryBegOff = HBtab->ebdTryBegOffset; IL_OFFSET tryEndOff = HBtab->ebdTryEndOffset; IL_OFFSET hndBegOff = HBtab->ebdHndBegOffset; IL_OFFSET hndEndOff = HBtab->ebdHndEndOffset; BasicBlock* block; for (block = hndBegBB; block && (block->bbCodeOffs < hndEndOff); block = block->bbNext) { if (!block->hasHndIndex()) { block->setHndIndex(XTnum); } // All blocks in a catch handler or filter are rarely run, except the entry if ((block != hndBegBB) && (hndBegBB->bbCatchTyp != BBCT_FINALLY)) { block->bbSetRunRarely(); } } /* Mark all blocks within the covered range of the try */ for (block = tryBegBB; block && (block->bbCodeOffs < tryEndOff); block = block->bbNext) { /* Mark this BB as belonging to a 'try' block */ if (!block->hasTryIndex()) { block->setTryIndex(XTnum); } #ifdef DEBUG /* Note: the BB can't span the 'try' block */ if (!(block->bbFlags & BBF_INTERNAL)) { noway_assert(tryBegOff <= block->bbCodeOffs); noway_assert(tryEndOff >= block->bbCodeOffsEnd || tryEndOff == tryBegOff); } #endif } /* Init ebdHandlerNestingLevel of current clause, and bump up value for all * enclosed clauses (which have to be before it in the table). * Innermost try-finally blocks must precede outermost * try-finally blocks. */ #if !defined(FEATURE_EH_FUNCLETS) HBtab->ebdHandlerNestingLevel = 0; #endif // !FEATURE_EH_FUNCLETS HBtab->ebdEnclosingTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; HBtab->ebdEnclosingHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; noway_assert(XTnum < compHndBBtabCount); noway_assert(XTnum == ehGetIndex(HBtab)); for (EHblkDsc* xtab = compHndBBtab; xtab < HBtab; xtab++) { #if !defined(FEATURE_EH_FUNCLETS) if (jitIsBetween(xtab->ebdHndBegOffs(), hndBegOff, hndEndOff)) { xtab->ebdHandlerNestingLevel++; } #endif // !FEATURE_EH_FUNCLETS /* If we haven't recorded an enclosing try index for xtab then see * if this EH region should be recorded. We check if the * first offset in the xtab lies within our region. If so, * the last offset also must lie within the region, due to * nesting rules. verInsertEhNode(), below, will check for proper nesting. */ if (xtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { bool begBetween = jitIsBetween(xtab->ebdTryBegOffs(), tryBegOff, tryEndOff); if (begBetween) { // Record the enclosing scope link xtab->ebdEnclosingTryIndex = (unsigned short)XTnum; } } /* Do the same for the enclosing handler index. */ if (xtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) { bool begBetween = jitIsBetween(xtab->ebdTryBegOffs(), hndBegOff, hndEndOff); if (begBetween) { // Record the enclosing scope link xtab->ebdEnclosingHndIndex = (unsigned short)XTnum; } } } } // end foreach handler table entry #if !defined(FEATURE_EH_FUNCLETS) for (EHblkDsc* const HBtab : EHClauses(this)) { if (ehMaxHndNestingCount <= HBtab->ebdHandlerNestingLevel) ehMaxHndNestingCount = HBtab->ebdHandlerNestingLevel + 1; } #endif // !FEATURE_EH_FUNCLETS { // always run these checks for a debug build verCheckNestingLevel(initRoot); } #ifndef DEBUG // fgNormalizeEH assumes that this test has been passed. And Ssa assumes that fgNormalizeEHTable // has been run. So do this unless we're in minOpts mode (and always in debug). if (!opts.MinOpts()) #endif { fgCheckBasicBlockControlFlow(); } #ifdef DEBUG if (verbose) { JITDUMP("*************** After fgFindBasicBlocks() has created the EH table\n"); fgDispHandlerTab(); } // We can't verify the handler table until all the IL legality checks have been done (above), since bad IL // (such as illegal nesting of regions) will trigger asserts here. fgVerifyHandlerTab(); #endif fgNormalizeEH(); fgCheckForLoopsInHandlers(); } //------------------------------------------------------------------------ // fgCheckForLoopsInHandlers: scan blocks seeing if any handler block // is a backedge target. // // Notes: // Sets compHasBackwardJumpInHandler if so. This will disable // setting patchpoints in this method and prompt the jit to // optimize the method instead. // // We assume any late-added handler (say for synchronized methods) will // not introduce any loops. // void Compiler::fgCheckForLoopsInHandlers() { // We only care about this if we are going to set OSR patchpoints // and the method has exception handling. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0)) { return; } if (JitConfig.TC_OnStackReplacement() == 0) { return; } if (info.compXcptnsCount == 0) { return; } // Walk blocks in handlers and filters, looing for a backedge target. // assert(!compHasBackwardJumpInHandler); for (BasicBlock* const blk : Blocks()) { if (blk->hasHndIndex()) { if (blk->bbFlags & BBF_BACKWARD_JUMP_TARGET) { JITDUMP("\nHander block " FMT_BB "is backward jump target; can't have patchpoints in this method\n"); compHasBackwardJumpInHandler = true; break; } } } } //------------------------------------------------------------------------ // fgFixEntryFlowForOSR: add control flow path from method start to // the appropriate IL offset for the OSR method // // Notes: // This is simply a branch from the method entry to the OSR entry -- // the block where the OSR method should begin execution. // // If the OSR entry is within a try we will eventually need add // suitable step blocks to reach the OSR entry without jumping into // the middle of the try. But we defer that until after importation. // See fgPostImportationCleanup. // void Compiler::fgFixEntryFlowForOSR() { // Ensure lookup IL->BB lookup table is valid // fgInitBBLookup(); // Remember the original entry block in case this method is tail recursive. // fgEntryBB = fgLookupBB(0); // Find the OSR entry block. // assert(info.compILEntry >= 0); BasicBlock* const osrEntry = fgLookupBB(info.compILEntry); // Remember the OSR entry block so we can find it again later. // fgOSREntryBB = osrEntry; // Now branch from method start to the right spot. // fgEnsureFirstBBisScratch(); fgFirstBB->bbJumpKind = BBJ_ALWAYS; fgFirstBB->bbJumpDest = osrEntry; fgAddRefPred(osrEntry, fgFirstBB); JITDUMP("OSR: redirecting flow at entry from entry " FMT_BB " to OSR entry " FMT_BB " for the importer\n", fgFirstBB->bbNum, osrEntry->bbNum); } /***************************************************************************** * Check control flow constraints for well formed IL. Bail if any of the constraints * are violated. */ void Compiler::fgCheckBasicBlockControlFlow() { assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks EHblkDsc* HBtab; for (BasicBlock* const blk : Blocks()) { if (blk->bbFlags & BBF_INTERNAL) { continue; } switch (blk->bbJumpKind) { case BBJ_NONE: // block flows into the next one (no jump) fgControlFlowPermitted(blk, blk->bbNext); break; case BBJ_ALWAYS: // block does unconditional jump to target fgControlFlowPermitted(blk, blk->bbJumpDest); break; case BBJ_COND: // block conditionally jumps to the target fgControlFlowPermitted(blk, blk->bbNext); fgControlFlowPermitted(blk, blk->bbJumpDest); break; case BBJ_RETURN: // block ends with 'ret' if (blk->hasTryIndex() || blk->hasHndIndex()) { BADCODE3("Return from a protected block", ". Before offset %04X", blk->bbCodeOffsEnd); } break; case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: if (!blk->hasHndIndex()) // must be part of a handler { BADCODE3("Missing handler", ". Before offset %04X", blk->bbCodeOffsEnd); } HBtab = ehGetDsc(blk->getHndIndex()); // Endfilter allowed only in a filter block if (blk->bbJumpKind == BBJ_EHFILTERRET) { if (!HBtab->HasFilter()) { BADCODE("Unexpected endfilter"); } } // endfinally allowed only in a finally/fault block else if (!HBtab->HasFinallyOrFaultHandler()) { BADCODE("Unexpected endfinally"); } // The handler block should be the innermost block // Exception blocks are listed, innermost first. if (blk->hasTryIndex() && (blk->getTryIndex() < blk->getHndIndex())) { BADCODE("endfinally / endfilter in nested try block"); } break; case BBJ_THROW: // block ends with 'throw' /* throw is permitted from every BB, so nothing to check */ /* importer makes sure that rethrow is done from a catch */ break; case BBJ_LEAVE: // block always jumps to the target, maybe out of guarded // region. Used temporarily until importing fgControlFlowPermitted(blk, blk->bbJumpDest, true); break; case BBJ_SWITCH: // block ends with a switch statement for (BasicBlock* const bTarget : blk->SwitchTargets()) { fgControlFlowPermitted(blk, bTarget); } break; case BBJ_EHCATCHRET: // block ends with a leave out of a catch (only #if defined(FEATURE_EH_FUNCLETS)) case BBJ_CALLFINALLY: // block always calls the target finally default: noway_assert(!"Unexpected bbJumpKind"); // these blocks don't get created until importing break; } } } /**************************************************************************** * Check that the leave from the block is legal. * Consider removing this check here if we can do it cheaply during importing */ void Compiler::fgControlFlowPermitted(BasicBlock* blkSrc, BasicBlock* blkDest, bool isLeave) { assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks unsigned srcHndBeg, destHndBeg; unsigned srcHndEnd, destHndEnd; bool srcInFilter, destInFilter; bool srcInCatch = false; EHblkDsc* srcHndTab; srcHndTab = ehInitHndRange(blkSrc, &srcHndBeg, &srcHndEnd, &srcInFilter); ehInitHndRange(blkDest, &destHndBeg, &destHndEnd, &destInFilter); /* Impose the rules for leaving or jumping from handler blocks */ if (blkSrc->hasHndIndex()) { srcInCatch = srcHndTab->HasCatchHandler() && srcHndTab->InHndRegionILRange(blkSrc); /* Are we jumping within the same handler index? */ if (BasicBlock::sameHndRegion(blkSrc, blkDest)) { /* Do we have a filter clause? */ if (srcHndTab->HasFilter()) { /* filters and catch handlers share same eh index */ /* we need to check for control flow between them. */ if (srcInFilter != destInFilter) { if (!jitIsBetween(blkDest->bbCodeOffs, srcHndBeg, srcHndEnd)) { BADCODE3("Illegal control flow between filter and handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } } } else { /* The handler indexes of blkSrc and blkDest are different */ if (isLeave) { /* Any leave instructions must not enter the dest handler from outside*/ if (!jitIsBetween(srcHndBeg, destHndBeg, destHndEnd)) { BADCODE3("Illegal use of leave to enter handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } else { /* We must use a leave to exit a handler */ BADCODE3("Illegal control flow out of a handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } /* Do we have a filter clause? */ if (srcHndTab->HasFilter()) { /* It is ok to leave from the handler block of a filter, */ /* but not from the filter block of a filter */ if (srcInFilter != destInFilter) { BADCODE3("Illegal to leave a filter handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } /* We should never leave a finally handler */ if (srcHndTab->HasFinallyHandler()) { BADCODE3("Illegal to leave a finally handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } /* We should never leave a fault handler */ if (srcHndTab->HasFaultHandler()) { BADCODE3("Illegal to leave a fault handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } } else if (blkDest->hasHndIndex()) { /* blkSrc was not inside a handler, but blkDst is inside a handler */ BADCODE3("Illegal control flow into a handler", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } /* Are we jumping from a catch handler into the corresponding try? */ /* VB uses this for "on error goto " */ if (isLeave && srcInCatch) { // inspect all handlers containing the jump source bool bValidJumpToTry = false; // are we jumping in a valid way from a catch to the corresponding try? bool bCatchHandlerOnly = true; // false if we are jumping out of a non-catch handler EHblkDsc* ehTableEnd; EHblkDsc* ehDsc; for (ehDsc = compHndBBtab, ehTableEnd = compHndBBtab + compHndBBtabCount; bCatchHandlerOnly && ehDsc < ehTableEnd; ehDsc++) { if (ehDsc->InHndRegionILRange(blkSrc)) { if (ehDsc->HasCatchHandler()) { if (ehDsc->InTryRegionILRange(blkDest)) { // If we already considered the jump for a different try/catch, // we would have two overlapping try regions with two overlapping catch // regions, which is illegal. noway_assert(!bValidJumpToTry); // Allowed if it is the first instruction of an inner try // (and all trys in between) // // try { // .. // _tryAgain: // .. // try { // _tryNestedInner: // .. // try { // _tryNestedIllegal: // .. // } catch { // .. // } // .. // } catch { // .. // } // .. // } catch { // .. // leave _tryAgain // Allowed // .. // leave _tryNestedInner // Allowed // .. // leave _tryNestedIllegal // Not Allowed // .. // } // // Note: The leave is allowed also from catches nested inside the catch shown above. /* The common case where leave is to the corresponding try */ if (ehDsc->ebdIsSameTry(this, blkDest->getTryIndex()) || /* Also allowed is a leave to the start of a try which starts in the handler's try */ fgFlowToFirstBlockOfInnerTry(ehDsc->ebdTryBeg, blkDest, false)) { bValidJumpToTry = true; } } } else { // We are jumping from a handler which is not a catch handler. // If it's a handler, but not a catch handler, it must be either a finally or fault if (!ehDsc->HasFinallyOrFaultHandler()) { BADCODE3("Handlers must be catch, finally, or fault", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } // Are we jumping out of this handler? if (!ehDsc->InHndRegionILRange(blkDest)) { bCatchHandlerOnly = false; } } } else if (ehDsc->InFilterRegionILRange(blkSrc)) { // Are we jumping out of a filter? if (!ehDsc->InFilterRegionILRange(blkDest)) { bCatchHandlerOnly = false; } } } if (bCatchHandlerOnly) { if (bValidJumpToTry) { return; } else { // FALL THROUGH // This is either the case of a leave to outside the try/catch, // or a leave to a try not nested in this try/catch. // The first case is allowed, the second one will be checked // later when we check the try block rules (it is illegal if we // jump to the middle of the destination try). } } else { BADCODE3("illegal leave to exit a finally, fault or filter", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } /* Check all the try block rules */ IL_OFFSET srcTryBeg; IL_OFFSET srcTryEnd; IL_OFFSET destTryBeg; IL_OFFSET destTryEnd; ehInitTryRange(blkSrc, &srcTryBeg, &srcTryEnd); ehInitTryRange(blkDest, &destTryBeg, &destTryEnd); /* Are we jumping between try indexes? */ if (!BasicBlock::sameTryRegion(blkSrc, blkDest)) { // Are we exiting from an inner to outer try? if (jitIsBetween(srcTryBeg, destTryBeg, destTryEnd) && jitIsBetween(srcTryEnd - 1, destTryBeg, destTryEnd)) { if (!isLeave) { BADCODE3("exit from try block without a leave", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } else if (jitIsBetween(destTryBeg, srcTryBeg, srcTryEnd)) { // check that the dest Try is first instruction of an inner try if (!fgFlowToFirstBlockOfInnerTry(blkSrc, blkDest, false)) { BADCODE3("control flow into middle of try", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } else // there is no nesting relationship between src and dest { if (isLeave) { // check that the dest Try is first instruction of an inner try sibling if (!fgFlowToFirstBlockOfInnerTry(blkSrc, blkDest, true)) { BADCODE3("illegal leave into middle of try", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } else { BADCODE3("illegal control flow in to/out of try block", ". Before offset %04X", blkSrc->bbCodeOffsEnd); } } } } /***************************************************************************** * Check that blkDest is the first block of an inner try or a sibling * with no intervening trys in between */ bool Compiler::fgFlowToFirstBlockOfInnerTry(BasicBlock* blkSrc, BasicBlock* blkDest, bool sibling) { assert(!fgNormalizeEHDone); // These rules aren't quite correct after EH normalization has introduced new blocks noway_assert(blkDest->hasTryIndex()); unsigned XTnum = blkDest->getTryIndex(); unsigned lastXTnum = blkSrc->hasTryIndex() ? blkSrc->getTryIndex() : compHndBBtabCount; noway_assert(XTnum < compHndBBtabCount); noway_assert(lastXTnum <= compHndBBtabCount); EHblkDsc* HBtab = ehGetDsc(XTnum); // check that we are not jumping into middle of try if (HBtab->ebdTryBeg != blkDest) { return false; } if (sibling) { noway_assert(!BasicBlock::sameTryRegion(blkSrc, blkDest)); // find the l.u.b of the two try ranges // Set lastXTnum to the l.u.b. HBtab = ehGetDsc(lastXTnum); for (lastXTnum++, HBtab++; lastXTnum < compHndBBtabCount; lastXTnum++, HBtab++) { if (jitIsBetweenInclusive(blkDest->bbNum, HBtab->ebdTryBeg->bbNum, HBtab->ebdTryLast->bbNum)) { break; } } } // now check there are no intervening trys between dest and l.u.b // (it is ok to have intervening trys as long as they all start at // the same code offset) HBtab = ehGetDsc(XTnum); for (XTnum++, HBtab++; XTnum < lastXTnum; XTnum++, HBtab++) { if (HBtab->ebdTryBeg->bbNum < blkDest->bbNum && blkDest->bbNum <= HBtab->ebdTryLast->bbNum) { return false; } } return true; } /***************************************************************************** * Returns the handler nesting level of the block. * *pFinallyNesting is set to the nesting level of the inner-most * finally-protected try the block is in. */ unsigned Compiler::fgGetNestingLevel(BasicBlock* block, unsigned* pFinallyNesting) { unsigned curNesting = 0; // How many handlers is the block in unsigned tryFin = (unsigned)-1; // curNesting when we see innermost finally-protected try unsigned XTnum; EHblkDsc* HBtab; /* We find the block's handler nesting level by walking over the complete exception table and find enclosing clauses. */ for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { noway_assert(HBtab->ebdTryBeg && HBtab->ebdHndBeg); if (HBtab->HasFinallyHandler() && (tryFin == (unsigned)-1) && bbInTryRegions(XTnum, block)) { tryFin = curNesting; } else if (bbInHandlerRegions(XTnum, block)) { curNesting++; } } if (tryFin == (unsigned)-1) { tryFin = curNesting; } if (pFinallyNesting) { *pFinallyNesting = curNesting - tryFin; } return curNesting; } //------------------------------------------------------------------------ // fgFindBlockILOffset: Given a block, find the IL offset corresponding to the first statement // in the block with a legal IL offset. Skip any leading statements that have BAD_IL_OFFSET. // If no statement has an initialized statement offset (including the case where there are // no statements in the block), then return BAD_IL_OFFSET. This function is used when // blocks are split or modified, and we want to maintain the IL offset as much as possible // to preserve good debugging behavior. // // Arguments: // block - The block to check. // // Return Value: // The first good IL offset of a statement in the block, or BAD_IL_OFFSET if such an IL offset // cannot be found. // IL_OFFSET Compiler::fgFindBlockILOffset(BasicBlock* block) { // This function searches for IL offsets in statement nodes, so it can't be used in LIR. We // could have a similar function for LIR that searches for GT_IL_OFFSET nodes. assert(!block->IsLIR()); for (Statement* const stmt : block->Statements()) { // Blocks always contain IL offsets in the root. DebugInfo di = stmt->GetDebugInfo().GetRoot(); if (di.IsValid()) { return di.GetLocation().GetOffset(); } } return BAD_IL_OFFSET; } //------------------------------------------------------------------------------ // fgSplitBlockAtEnd - split the given block into two blocks. // All code in the block stays in the original block. // Control falls through from original to new block, and // the new block is returned. //------------------------------------------------------------------------------ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) { // We'd like to use fgNewBBafter(), but we need to update the preds list before linking in the new block. // (We need the successors of 'curr' to be correct when we do this.) BasicBlock* newBlock = bbNewBasicBlock(curr->bbJumpKind); // Start the new block with no refs. When we set the preds below, this will get updated correctly. newBlock->bbRefs = 0; // For each successor of the original block, set the new block as their predecessor. // Note we are using the "rational" version of the successor iterator that does not hide the finallyret arcs. // Without these arcs, a block 'b' may not be a member of succs(preds(b)) if (curr->bbJumpKind != BBJ_SWITCH) { for (BasicBlock* const succ : curr->Succs(this)) { if (succ != newBlock) { JITDUMP(FMT_BB " previous predecessor was " FMT_BB ", now is " FMT_BB "\n", succ->bbNum, curr->bbNum, newBlock->bbNum); fgReplacePred(succ, curr, newBlock); } } newBlock->bbJumpDest = curr->bbJumpDest; curr->bbJumpDest = nullptr; } else { // In the case of a switch statement there's more complicated logic in order to wire up the predecessor lists // but fortunately there's an existing method that implements this functionality. newBlock->bbJumpSwt = curr->bbJumpSwt; fgChangeSwitchBlock(curr, newBlock); curr->bbJumpSwt = nullptr; } newBlock->inheritWeight(curr); // Set the new block's flags. Note that the new block isn't BBF_INTERNAL unless the old block is. newBlock->bbFlags = curr->bbFlags; // Remove flags that the new block can't have. newBlock->bbFlags &= ~(BBF_TRY_BEG | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_FUNCLET_BEG | BBF_LOOP_PREHEADER | BBF_KEEP_BBJ_ALWAYS | BBF_PATCHPOINT | BBF_BACKWARD_JUMP_TARGET | BBF_LOOP_ALIGN); // Remove the GC safe bit on the new block. It seems clear that if we split 'curr' at the end, // such that all the code is left in 'curr', and 'newBlock' just gets the control flow, then // both 'curr' and 'newBlock' could accurately retain an existing GC safe bit. However, callers // use this function to split blocks in the middle, or at the beginning, and they don't seem to // be careful about updating this flag appropriately. So, removing the GC safe bit is simply // conservative: some functions might end up being fully interruptible that could be partially // interruptible if we exercised more care here. newBlock->bbFlags &= ~BBF_GC_SAFE_POINT; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) newBlock->bbFlags &= ~(BBF_FINALLY_TARGET); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // The new block has no code, so we leave bbCodeOffs/bbCodeOffsEnd set to BAD_IL_OFFSET. If a caller // puts code in the block, then it needs to update these. // Insert the new block in the block list after the 'curr' block. fgInsertBBafter(curr, newBlock); fgExtendEHRegionAfter(curr); // The new block is in the same EH region as the old block. // Remove flags from the old block that are no longer possible. curr->bbFlags &= ~(BBF_HAS_JMP | BBF_RETLESS_CALL); // Default to fallthru, and add the arc for that. curr->bbJumpKind = BBJ_NONE; fgAddRefPred(newBlock, curr); return newBlock; } //------------------------------------------------------------------------------ // fgSplitBlockAfterStatement - Split the given block, with all code after // the given statement going into the second block. //------------------------------------------------------------------------------ BasicBlock* Compiler::fgSplitBlockAfterStatement(BasicBlock* curr, Statement* stmt) { assert(!curr->IsLIR()); // No statements in LIR, so you can't use this function. BasicBlock* newBlock = fgSplitBlockAtEnd(curr); if (stmt != nullptr) { newBlock->bbStmtList = stmt->GetNextStmt(); if (newBlock->bbStmtList != nullptr) { newBlock->bbStmtList->SetPrevStmt(curr->bbStmtList->GetPrevStmt()); } curr->bbStmtList->SetPrevStmt(stmt); stmt->SetNextStmt(nullptr); // Update the IL offsets of the blocks to match the split. assert(newBlock->bbCodeOffs == BAD_IL_OFFSET); assert(newBlock->bbCodeOffsEnd == BAD_IL_OFFSET); // curr->bbCodeOffs remains the same newBlock->bbCodeOffsEnd = curr->bbCodeOffsEnd; IL_OFFSET splitPointILOffset = fgFindBlockILOffset(newBlock); curr->bbCodeOffsEnd = splitPointILOffset; newBlock->bbCodeOffs = splitPointILOffset; } else { assert(curr->bbStmtList == nullptr); // if no tree was given then it better be an empty block } return newBlock; } //------------------------------------------------------------------------------ // fgSplitBlockAfterNode - Split the given block, with all code after // the given node going into the second block. // This function is only used in LIR. //------------------------------------------------------------------------------ BasicBlock* Compiler::fgSplitBlockAfterNode(BasicBlock* curr, GenTree* node) { assert(curr->IsLIR()); BasicBlock* newBlock = fgSplitBlockAtEnd(curr); if (node != nullptr) { LIR::Range& currBBRange = LIR::AsRange(curr); if (node != currBBRange.LastNode()) { LIR::Range nodesToMove = currBBRange.Remove(node->gtNext, currBBRange.LastNode()); LIR::AsRange(newBlock).InsertAtBeginning(std::move(nodesToMove)); } // Update the IL offsets of the blocks to match the split. assert(newBlock->bbCodeOffs == BAD_IL_OFFSET); assert(newBlock->bbCodeOffsEnd == BAD_IL_OFFSET); // curr->bbCodeOffs remains the same newBlock->bbCodeOffsEnd = curr->bbCodeOffsEnd; // Search backwards from the end of the current block looking for the IL offset to use // for the end IL offset for the original block. IL_OFFSET splitPointILOffset = BAD_IL_OFFSET; LIR::Range::ReverseIterator riter; LIR::Range::ReverseIterator riterEnd; for (riter = currBBRange.rbegin(), riterEnd = currBBRange.rend(); riter != riterEnd; ++riter) { if ((*riter)->gtOper == GT_IL_OFFSET) { GenTreeILOffset* ilOffset = (*riter)->AsILOffset(); DebugInfo rootDI = ilOffset->gtStmtDI.GetRoot(); if (rootDI.IsValid()) { splitPointILOffset = rootDI.GetLocation().GetOffset(); break; } } } curr->bbCodeOffsEnd = splitPointILOffset; // Also use this as the beginning offset of the next block. Presumably we could/should // look to see if the first node is a GT_IL_OFFSET node, and use that instead. newBlock->bbCodeOffs = splitPointILOffset; } else { assert(curr->bbStmtList == nullptr); // if no node was given then it better be an empty block } return newBlock; } //------------------------------------------------------------------------------ // fgSplitBlockAtBeginning - Split the given block into two blocks. // Control falls through from original to new block, // and the new block is returned. // All code in the original block goes into the new block //------------------------------------------------------------------------------ BasicBlock* Compiler::fgSplitBlockAtBeginning(BasicBlock* curr) { BasicBlock* newBlock = fgSplitBlockAtEnd(curr); if (curr->IsLIR()) { newBlock->SetFirstLIRNode(curr->GetFirstLIRNode()); curr->SetFirstLIRNode(nullptr); } else { newBlock->bbStmtList = curr->bbStmtList; curr->bbStmtList = nullptr; } // The new block now has all the code, and the old block has none. Update the // IL offsets for the block to reflect this. newBlock->bbCodeOffs = curr->bbCodeOffs; newBlock->bbCodeOffsEnd = curr->bbCodeOffsEnd; curr->bbCodeOffs = BAD_IL_OFFSET; curr->bbCodeOffsEnd = BAD_IL_OFFSET; return newBlock; } //------------------------------------------------------------------------ // fgSplitEdge: Splits the edge between a block 'curr' and its successor 'succ' by creating a new block // that replaces 'succ' as a successor of 'curr', and which branches unconditionally // to (or falls through to) 'succ'. Note that for a BBJ_COND block 'curr', // 'succ' might be the fall-through path or the branch path from 'curr'. // // Arguments: // curr - A block which branches to 'succ' // succ - The target block // // Return Value: // Returns a new block, that is a successor of 'curr' and which branches unconditionally to 'succ' // // Assumptions: // 'curr' must have a bbJumpKind of BBJ_COND, BBJ_ALWAYS, or BBJ_SWITCH // // Notes: // The returned block is empty. // Can be invoked before pred lists are built. BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) { assert(curr->KindIs(BBJ_COND, BBJ_SWITCH, BBJ_ALWAYS)); if (fgComputePredsDone) { assert(fgGetPredForBlock(succ, curr) != nullptr); } BasicBlock* newBlock; if (succ == curr->bbNext) { // The successor is the fall-through path of a BBJ_COND, or // an immediately following block of a BBJ_SWITCH (which has // no fall-through path). For this case, simply insert a new // fall-through block after 'curr'. newBlock = fgNewBBafter(BBJ_NONE, curr, true /*extendRegion*/); } else { newBlock = fgNewBBinRegion(BBJ_ALWAYS, curr, curr->isRunRarely()); // The new block always jumps to 'succ' newBlock->bbJumpDest = succ; } newBlock->bbFlags |= (curr->bbFlags & succ->bbFlags & (BBF_BACKWARD_JUMP)); JITDUMP("Splitting edge from " FMT_BB " to " FMT_BB "; adding " FMT_BB "\n", curr->bbNum, succ->bbNum, newBlock->bbNum); if (curr->bbJumpKind == BBJ_COND) { fgReplacePred(succ, curr, newBlock); if (curr->bbJumpDest == succ) { // Now 'curr' jumps to newBlock curr->bbJumpDest = newBlock; } fgAddRefPred(newBlock, curr); } else if (curr->bbJumpKind == BBJ_SWITCH) { // newBlock replaces 'succ' in the switch. fgReplaceSwitchJumpTarget(curr, newBlock, succ); // And 'succ' has 'newBlock' as a new predecessor. fgAddRefPred(succ, newBlock); } else { assert(curr->bbJumpKind == BBJ_ALWAYS); fgReplacePred(succ, curr, newBlock); curr->bbJumpDest = newBlock; fgAddRefPred(newBlock, curr); } // This isn't accurate, but it is complex to compute a reasonable number so just assume that we take the // branch 50% of the time. // if (curr->bbJumpKind != BBJ_ALWAYS) { newBlock->inheritWeightPercentage(curr, 50); } // The bbLiveIn and bbLiveOut are both equal to the bbLiveIn of 'succ' if (fgLocalVarLivenessDone) { VarSetOps::Assign(this, newBlock->bbLiveIn, succ->bbLiveIn); VarSetOps::Assign(this, newBlock->bbLiveOut, succ->bbLiveIn); } return newBlock; } // Removes the block from the bbPrev/bbNext chain // Updates fgFirstBB and fgLastBB if necessary // Does not update fgFirstFuncletBB or fgFirstColdBlock (fgUnlinkRange does) void Compiler::fgUnlinkBlock(BasicBlock* block) { if (block->bbPrev) { block->bbPrev->bbNext = block->bbNext; if (block->bbNext) { block->bbNext->bbPrev = block->bbPrev; } else { fgLastBB = block->bbPrev; } } else { assert(block == fgFirstBB); assert(block != fgLastBB); assert((fgFirstBBScratch == nullptr) || (fgFirstBBScratch == fgFirstBB)); fgFirstBB = block->bbNext; fgFirstBB->bbPrev = nullptr; if (fgFirstBBScratch != nullptr) { #ifdef DEBUG // We had created an initial scratch BB, but now we're deleting it. if (verbose) { printf("Unlinking scratch " FMT_BB "\n", block->bbNum); } #endif // DEBUG fgFirstBBScratch = nullptr; } } } /***************************************************************************************************** * * Function called to unlink basic block range [bBeg .. bEnd] from the basic block list. * * 'bBeg' can't be the first block. */ void Compiler::fgUnlinkRange(BasicBlock* bBeg, BasicBlock* bEnd) { assert(bBeg != nullptr); assert(bEnd != nullptr); BasicBlock* bPrev = bBeg->bbPrev; assert(bPrev != nullptr); // Can't unlink a range starting with the first block bPrev->setNext(bEnd->bbNext); /* If we removed the last block in the method then update fgLastBB */ if (fgLastBB == bEnd) { fgLastBB = bPrev; noway_assert(fgLastBB->bbNext == nullptr); } // If bEnd was the first Cold basic block update fgFirstColdBlock if (fgFirstColdBlock == bEnd) { fgFirstColdBlock = bPrev->bbNext; } #if defined(FEATURE_EH_FUNCLETS) #ifdef DEBUG // You can't unlink a range that includes the first funclet block. A range certainly // can't cross the non-funclet/funclet region. And you can't unlink the first block // of the first funclet with this, either. (If that's necessary, it could be allowed // by updating fgFirstFuncletBB to bEnd->bbNext.) for (BasicBlock* tempBB = bBeg; tempBB != bEnd->bbNext; tempBB = tempBB->bbNext) { assert(tempBB != fgFirstFuncletBB); } #endif // DEBUG #endif // FEATURE_EH_FUNCLETS } /***************************************************************************************************** * * Function called to remove a basic block */ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) { /* The block has to be either unreachable or empty */ PREFIX_ASSUME(block != nullptr); BasicBlock* bPrev = block->bbPrev; JITDUMP("fgRemoveBlock " FMT_BB ", unreachable=%s\n", block->bbNum, dspBool(unreachable)); // If we've cached any mappings from switch blocks to SwitchDesc's (which contain only the // *unique* successors of the switch block), invalidate that cache, since an entry in one of // the SwitchDescs might be removed. InvalidateUniqueSwitchSuccMap(); noway_assert((block == fgFirstBB) || (bPrev && (bPrev->bbNext == block))); noway_assert(!(block->bbFlags & BBF_DONT_REMOVE)); // Should never remove a genReturnBB, as we might have special hookups there. noway_assert(block != genReturnBB); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't remove a finally target assert(!(block->bbFlags & BBF_FINALLY_TARGET)); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (unreachable) { PREFIX_ASSUME(bPrev != nullptr); fgUnreachableBlock(block); #if defined(FEATURE_EH_FUNCLETS) // If block was the fgFirstFuncletBB then set fgFirstFuncletBB to block->bbNext if (block == fgFirstFuncletBB) { fgFirstFuncletBB = block->bbNext; } #endif // FEATURE_EH_FUNCLETS if (bPrev->bbJumpKind == BBJ_CALLFINALLY) { // bPrev CALL becomes RETLESS as the BBJ_ALWAYS block is unreachable bPrev->bbFlags |= BBF_RETLESS_CALL; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) NO_WAY("No retless call finally blocks; need unwind target instead"); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else if (bPrev->bbJumpKind == BBJ_ALWAYS && bPrev->bbJumpDest == block->bbNext && !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && (block->bbNext != fgFirstColdBlock)) { // previous block is a BBJ_ALWAYS to the next block: change to BBJ_NONE. // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), // because that would violate our invariant that BBJ_CALLFINALLY blocks are followed by // BBJ_ALWAYS blocks. bPrev->bbJumpKind = BBJ_NONE; } // If this is the first Cold basic block update fgFirstColdBlock if (block == fgFirstColdBlock) { fgFirstColdBlock = block->bbNext; } /* Unlink this block from the bbNext chain */ fgUnlinkBlock(block); /* At this point the bbPreds and bbRefs had better be zero */ noway_assert((block->bbRefs == 0) && (block->bbPreds == nullptr)); /* A BBJ_CALLFINALLY is usually paired with a BBJ_ALWAYS. * If we delete such a BBJ_CALLFINALLY we also delete the BBJ_ALWAYS */ if (block->isBBCallAlwaysPair()) { BasicBlock* leaveBlk = block->bbNext; noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS); leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; leaveBlk->bbRefs = 0; leaveBlk->bbPreds = nullptr; fgRemoveBlock(leaveBlk, /* unreachable */ true); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) fgClearFinallyTargetBit(leaveBlk->bbJumpDest); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else if (block->bbJumpKind == BBJ_RETURN) { fgRemoveReturnBlock(block); } } else // block is empty { noway_assert(block->isEmpty()); // The block cannot follow a non-retless BBJ_CALLFINALLY (because we don't know who may jump to it). noway_assert(!block->isBBCallAlwaysPairTail()); /* This cannot be the last basic block */ noway_assert(block != fgLastBB); #ifdef DEBUG if (verbose) { printf("Removing empty " FMT_BB "\n", block->bbNum); } #endif // DEBUG #ifdef DEBUG /* Some extra checks for the empty case */ switch (block->bbJumpKind) { case BBJ_NONE: break; case BBJ_ALWAYS: /* Do not remove a block that jumps to itself - used for while (true){} */ noway_assert(block->bbJumpDest != block); /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ noway_assert(bPrev && bPrev->bbJumpKind == BBJ_NONE); break; default: noway_assert(!"Empty block of this type cannot be removed!"); break; } #endif // DEBUG noway_assert(block->KindIs(BBJ_NONE, BBJ_ALWAYS)); /* Who is the "real" successor of this block? */ BasicBlock* succBlock; if (block->bbJumpKind == BBJ_ALWAYS) { succBlock = block->bbJumpDest; } else { succBlock = block->bbNext; } bool skipUnmarkLoop = false; // If block is the backedge for a loop and succBlock precedes block // then the succBlock becomes the new LOOP HEAD // NOTE: there's an assumption here that the blocks are numbered in increasing bbNext order. // NOTE 2: if fgDomsComputed is false, then we can't check reachability. However, if this is // the case, then the loop structures probably are also invalid, and shouldn't be used. This // can be the case late in compilation (such as Lower), where remnants of earlier created // structures exist, but haven't been maintained. if (block->isLoopHead() && (succBlock->bbNum <= block->bbNum)) { succBlock->bbFlags |= BBF_LOOP_HEAD; if (block->isLoopAlign()) { loopAlignCandidates++; succBlock->bbFlags |= BBF_LOOP_ALIGN; JITDUMP("Propagating LOOP_ALIGN flag from " FMT_BB " to " FMT_BB " for " FMT_LP "\n ", block->bbNum, succBlock->bbNum, block->bbNatLoopNum); } if (fgDomsComputed && fgReachable(succBlock, block)) { // Mark all the reachable blocks between 'succBlock' and 'bPrev' optScaleLoopBlocks(succBlock, bPrev); } } else if (succBlock->isLoopHead() && bPrev && (succBlock->bbNum <= bPrev->bbNum)) { skipUnmarkLoop = true; } // If this is the first Cold basic block update fgFirstColdBlock if (block == fgFirstColdBlock) { fgFirstColdBlock = block->bbNext; } #if defined(FEATURE_EH_FUNCLETS) // Update fgFirstFuncletBB if necessary if (block == fgFirstFuncletBB) { fgFirstFuncletBB = block->bbNext; } #endif // FEATURE_EH_FUNCLETS /* First update the loop table and bbWeights */ optUpdateLoopsBeforeRemoveBlock(block, skipUnmarkLoop); // Update successor block start IL offset, if empty predecessor // covers the immediately preceding range. if ((block->bbCodeOffsEnd == succBlock->bbCodeOffs) && (block->bbCodeOffs != BAD_IL_OFFSET)) { assert(block->bbCodeOffs <= succBlock->bbCodeOffs); succBlock->bbCodeOffs = block->bbCodeOffs; } /* Remove the block */ if (bPrev == nullptr) { /* special case if this is the first BB */ noway_assert(block == fgFirstBB); /* Must be a fall through to next block */ noway_assert(block->bbJumpKind == BBJ_NONE); /* old block no longer gets the extra ref count for being the first block */ block->bbRefs--; succBlock->bbRefs++; } /* Update bbRefs and bbPreds. * All blocks jumping to 'block' now jump to 'succBlock'. * First, remove 'block' from the predecessor list of succBlock. */ fgRemoveRefPred(succBlock, block); for (flowList* const pred : block->PredEdges()) { BasicBlock* predBlock = pred->getBlock(); /* Are we changing a loop backedge into a forward jump? */ if (block->isLoopHead() && (predBlock->bbNum >= block->bbNum) && (predBlock->bbNum <= succBlock->bbNum)) { /* First update the loop table and bbWeights */ optUpdateLoopsBeforeRemoveBlock(predBlock); } /* If predBlock is a new predecessor, then add it to succBlock's predecessor's list. */ if (predBlock->bbJumpKind != BBJ_SWITCH) { // Even if the pred is not a switch, we could have a conditional branch // to the fallthrough, so duplicate there could be preds for (unsigned i = 0; i < pred->flDupCount; i++) { fgAddRefPred(succBlock, predBlock); } } /* change all jumps to the removed block */ switch (predBlock->bbJumpKind) { default: noway_assert(!"Unexpected bbJumpKind in fgRemoveBlock()"); break; case BBJ_NONE: noway_assert(predBlock == bPrev); PREFIX_ASSUME(bPrev != nullptr); /* In the case of BBJ_ALWAYS we have to change the type of its predecessor */ if (block->bbJumpKind == BBJ_ALWAYS) { /* bPrev now becomes a BBJ_ALWAYS */ bPrev->bbJumpKind = BBJ_ALWAYS; bPrev->bbJumpDest = succBlock; } break; case BBJ_COND: /* The links for the direct predecessor case have already been updated above */ if (predBlock->bbJumpDest != block) { break; } /* Check if both side of the BBJ_COND now jump to the same block */ if (predBlock->bbNext == succBlock) { // Make sure we are replacing "block" with "succBlock" in predBlock->bbJumpDest. noway_assert(predBlock->bbJumpDest == block); predBlock->bbJumpDest = succBlock; fgRemoveConditionalJump(predBlock); break; } /* Fall through for the jump case */ FALLTHROUGH; case BBJ_CALLFINALLY: case BBJ_ALWAYS: case BBJ_EHCATCHRET: noway_assert(predBlock->bbJumpDest == block); predBlock->bbJumpDest = succBlock; break; case BBJ_SWITCH: // Change any jumps from 'predBlock' (a BBJ_SWITCH) to 'block' to jump to 'succBlock' // // For the jump targets of 'predBlock' (a BBJ_SWITCH) that jump to 'block' // remove the old predecessor at 'block' from 'predBlock' and // add the new predecessor at 'succBlock' from 'predBlock' // fgReplaceSwitchJumpTarget(predBlock, succBlock, block); break; } } fgUnlinkBlock(block); block->bbFlags |= BBF_REMOVED; } // If this was marked for alignment, remove it block->unmarkLoopAlign(this DEBUG_ARG("Removed block")); if (bPrev != nullptr) { switch (bPrev->bbJumpKind) { case BBJ_CALLFINALLY: // If prev is a BBJ_CALLFINALLY it better be marked as RETLESS noway_assert(bPrev->bbFlags & BBF_RETLESS_CALL); break; case BBJ_ALWAYS: // Check for branch to next block. Just make sure the BBJ_ALWAYS block is not // part of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. We do this here and don't rely on fgUpdateFlowGraph // because we can be called by ComputeDominators and it expects it to remove this jump to // the next block. This is the safest fix. We should remove all this BBJ_CALLFINALLY/BBJ_ALWAYS // pairing. if ((bPrev->bbJumpDest == bPrev->bbNext) && !fgInDifferentRegions(bPrev, bPrev->bbJumpDest)) // We don't remove a branch from Hot -> Cold { if ((bPrev == fgFirstBB) || !bPrev->isBBCallAlwaysPairTail()) { // It's safe to change the jump type bPrev->bbJumpKind = BBJ_NONE; } } break; case BBJ_COND: /* Check for branch to next block */ if (bPrev->bbJumpDest == bPrev->bbNext) { fgRemoveConditionalJump(bPrev); } break; default: break; } ehUpdateForDeletedBlock(block); } } /***************************************************************************** * * Function called to connect to block that previously had a fall through */ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) { BasicBlock* jmpBlk = nullptr; /* If bSrc is non-NULL */ if (bSrc != nullptr) { /* If bSrc falls through to a block that is not bDst, we will insert a jump to bDst */ if (bSrc->bbFallsThrough() && (bSrc->bbNext != bDst)) { switch (bSrc->bbJumpKind) { case BBJ_NONE: bSrc->bbJumpKind = BBJ_ALWAYS; bSrc->bbJumpDest = bDst; #ifdef DEBUG if (verbose) { printf("Block " FMT_BB " ended with a BBJ_NONE, Changed to an unconditional jump to " FMT_BB "\n", bSrc->bbNum, bSrc->bbJumpDest->bbNum); } #endif break; case BBJ_CALLFINALLY: case BBJ_COND: // Add a new block after bSrc which jumps to 'bDst' jmpBlk = fgNewBBafter(BBJ_ALWAYS, bSrc, true); if (fgComputePredsDone) { fgAddRefPred(jmpBlk, bSrc, fgGetPredForBlock(bDst, bSrc)); } // Record the loop number in the new block jmpBlk->bbNatLoopNum = bSrc->bbNatLoopNum; // When adding a new jmpBlk we will set the bbWeight and bbFlags // if (fgHaveValidEdgeWeights && fgHaveProfileData()) { noway_assert(fgComputePredsDone); flowList* newEdge = fgGetPredForBlock(jmpBlk, bSrc); jmpBlk->bbWeight = (newEdge->edgeWeightMin() + newEdge->edgeWeightMax()) / 2; if (bSrc->bbWeight == BB_ZERO_WEIGHT) { jmpBlk->bbWeight = BB_ZERO_WEIGHT; } if (jmpBlk->bbWeight == BB_ZERO_WEIGHT) { jmpBlk->bbFlags |= BBF_RUN_RARELY; } weight_t weightDiff = (newEdge->edgeWeightMax() - newEdge->edgeWeightMin()); weight_t slop = BasicBlock::GetSlopFraction(bSrc, bDst); // // If the [min/max] values for our edge weight is within the slop factor // then we will set the BBF_PROF_WEIGHT flag for the block // if (weightDiff <= slop) { jmpBlk->bbFlags |= BBF_PROF_WEIGHT; } } else { // We set the bbWeight to the smaller of bSrc->bbWeight or bDst->bbWeight if (bSrc->bbWeight < bDst->bbWeight) { jmpBlk->bbWeight = bSrc->bbWeight; jmpBlk->bbFlags |= (bSrc->bbFlags & BBF_RUN_RARELY); } else { jmpBlk->bbWeight = bDst->bbWeight; jmpBlk->bbFlags |= (bDst->bbFlags & BBF_RUN_RARELY); } } jmpBlk->bbJumpDest = bDst; if (fgComputePredsDone) { fgReplacePred(bDst, bSrc, jmpBlk); } else { jmpBlk->bbFlags |= BBF_IMPORTED; } #ifdef DEBUG if (verbose) { printf("Added an unconditional jump to " FMT_BB " after block " FMT_BB "\n", jmpBlk->bbJumpDest->bbNum, bSrc->bbNum); } #endif // DEBUG break; default: noway_assert(!"Unexpected bbJumpKind"); break; } } else { // If bSrc is an unconditional branch to the next block // then change it to a BBJ_NONE block // if ((bSrc->bbJumpKind == BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (bSrc->bbJumpDest == bSrc->bbNext)) { bSrc->bbJumpKind = BBJ_NONE; #ifdef DEBUG if (verbose) { printf("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB " into a BBJ_NONE block\n", bSrc->bbNum, bSrc->bbNext->bbNum); } #endif // DEBUG } } } return jmpBlk; } //------------------------------------------------------------------------ // fgRenumberBlocks: update block bbNums to reflect bbNext order // // Returns: // true if blocks were renumbered or maxBBNum was updated. // // Notes: // Walk the flow graph, reassign block numbers to keep them in ascending order. // Return 'true' if any renumbering was actually done, OR if we change the // maximum number of assigned basic blocks (this can happen if we do inlining, // create a new, high-numbered block, then that block goes away. We go to // renumber the blocks, none of them actually change number, but we shrink the // maximum assigned block number. This affects the block set epoch). // // As a consequence of renumbering, block pred lists may need to be reordered. // bool Compiler::fgRenumberBlocks() { // If we renumber the blocks the dominator information will be out-of-date if (fgDomsComputed) { noway_assert(!"Can't call Compiler::fgRenumberBlocks() when fgDomsComputed==true"); } #ifdef DEBUG if (verbose) { printf("\n*************** Before renumbering the basic blocks\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool renumbered = false; bool newMaxBBNum = false; BasicBlock* block; unsigned numStart = 1 + (compIsForInlining() ? impInlineInfo->InlinerCompiler->fgBBNumMax : 0); unsigned num; for (block = fgFirstBB, num = numStart; block != nullptr; block = block->bbNext, num++) { noway_assert((block->bbFlags & BBF_REMOVED) == 0); if (block->bbNum != num) { renumbered = true; #ifdef DEBUG if (verbose) { printf("Renumber " FMT_BB " to " FMT_BB "\n", block->bbNum, num); } #endif // DEBUG block->bbNum = num; } if (block->bbNext == nullptr) { fgLastBB = block; fgBBcount = num - numStart + 1; if (compIsForInlining()) { if (impInlineInfo->InlinerCompiler->fgBBNumMax != num) { impInlineInfo->InlinerCompiler->fgBBNumMax = num; newMaxBBNum = true; } } else { if (fgBBNumMax != num) { fgBBNumMax = num; newMaxBBNum = true; } } } } // If we renumbered, then we may need to reorder some pred lists. // if (renumbered && fgComputePredsDone) { for (BasicBlock* const block : Blocks()) { block->ensurePredListOrder(this); } } #ifdef DEBUG if (verbose) { printf("\n*************** After renumbering the basic blocks\n"); if (renumbered) { fgDispBasicBlocks(); fgDispHandlerTab(); } else { printf("=============== No blocks renumbered!\n"); } } #endif // DEBUG // Now update the BlockSet epoch, which depends on the block numbers. // If any blocks have been renumbered then create a new BlockSet epoch. // Even if we have not renumbered any blocks, we might still need to force // a new BlockSet epoch, for one of several reasons. If there are any new // blocks with higher numbers than the former maximum numbered block, then we // need a new epoch with a new size matching the new largest numbered block. // Also, if the number of blocks is different from the last time we set the // BlockSet epoch, then we need a new epoch. This wouldn't happen if we // renumbered blocks after every block addition/deletion, but it might be // the case that we can change the number of blocks, then set the BlockSet // epoch without renumbering, then change the number of blocks again, then // renumber. if (renumbered || newMaxBBNum) { NewBasicBlockEpoch(); // The key in the unique switch successor map is dependent on the block number, so invalidate that cache. InvalidateUniqueSwitchSuccMap(); } else { EnsureBasicBlockEpoch(); } // Tell our caller if any blocks actually were renumbered. return renumbered || newMaxBBNum; } /***************************************************************************** * * Is the BasicBlock bJump a forward branch? * Optionally bSrc can be supplied to indicate that * bJump must be forward with respect to bSrc */ bool Compiler::fgIsForwardBranch(BasicBlock* bJump, BasicBlock* bSrc /* = NULL */) { bool result = false; if (bJump->KindIs(BBJ_COND, BBJ_ALWAYS)) { BasicBlock* bDest = bJump->bbJumpDest; BasicBlock* bTemp = (bSrc == nullptr) ? bJump : bSrc; while (true) { bTemp = bTemp->bbNext; if (bTemp == nullptr) { break; } if (bTemp == bDest) { result = true; break; } } } return result; } /***************************************************************************** * * Returns true if it is allowable (based upon the EH regions) * to place block bAfter immediately after bBefore. It is allowable * if the 'bBefore' and 'bAfter' blocks are in the exact same EH region. */ bool Compiler::fgEhAllowsMoveBlock(BasicBlock* bBefore, BasicBlock* bAfter) { return BasicBlock::sameEHRegion(bBefore, bAfter); } /***************************************************************************** * * Function called to move the range of blocks [bStart .. bEnd]. * The blocks are placed immediately after the insertAfterBlk. * fgFirstFuncletBB is not updated; that is the responsibility of the caller, if necessary. */ void Compiler::fgMoveBlocksAfter(BasicBlock* bStart, BasicBlock* bEnd, BasicBlock* insertAfterBlk) { /* We have decided to insert the block(s) after 'insertAfterBlk' */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("Relocated block%s [" FMT_BB ".." FMT_BB "] inserted after " FMT_BB "%s\n", (bStart == bEnd) ? "" : "s", bStart->bbNum, bEnd->bbNum, insertAfterBlk->bbNum, (insertAfterBlk->bbNext == nullptr) ? " at the end of method" : ""); } #endif // DEBUG /* relink [bStart .. bEnd] into the flow graph */ bEnd->bbNext = insertAfterBlk->bbNext; if (insertAfterBlk->bbNext) { insertAfterBlk->bbNext->bbPrev = bEnd; } insertAfterBlk->setNext(bStart); /* If insertAfterBlk was fgLastBB then update fgLastBB */ if (insertAfterBlk == fgLastBB) { fgLastBB = bEnd; noway_assert(fgLastBB->bbNext == nullptr); } } /***************************************************************************** * * Function called to relocate a single range to the end of the method. * Only an entire consecutive region can be moved and it will be kept together. * Except for the first block, the range cannot have any blocks that jump into or out of the region. * When successful we return the bLast block which is the last block that we relocated. * When unsuccessful we return NULL. ============================================================= NOTE: This function can invalidate all pointers into the EH table, as well as change the size of the EH table! ============================================================= */ BasicBlock* Compiler::fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType) { INDEBUG(const char* reason = "None";) // Figure out the range of blocks we're going to move unsigned XTnum; EHblkDsc* HBtab; BasicBlock* bStart = nullptr; BasicBlock* bMiddle = nullptr; BasicBlock* bLast = nullptr; BasicBlock* bPrev = nullptr; #if defined(FEATURE_EH_FUNCLETS) // We don't support moving try regions... yet? noway_assert(relocateType == FG_RELOCATE_HANDLER); #endif // FEATURE_EH_FUNCLETS HBtab = ehGetDsc(regionIndex); if (relocateType == FG_RELOCATE_TRY) { bStart = HBtab->ebdTryBeg; bLast = HBtab->ebdTryLast; } else if (relocateType == FG_RELOCATE_HANDLER) { if (HBtab->HasFilter()) { // The filter and handler funclets must be moved together, and remain contiguous. bStart = HBtab->ebdFilter; bMiddle = HBtab->ebdHndBeg; bLast = HBtab->ebdHndLast; } else { bStart = HBtab->ebdHndBeg; bLast = HBtab->ebdHndLast; } } // Our range must contain either all rarely run blocks or all non-rarely run blocks bool inTheRange = false; bool validRange = false; BasicBlock* block; noway_assert(bStart != nullptr && bLast != nullptr); if (bStart == fgFirstBB) { INDEBUG(reason = "can not relocate first block";) goto FAILURE; } #if !defined(FEATURE_EH_FUNCLETS) // In the funclets case, we still need to set some information on the handler blocks if (bLast->bbNext == NULL) { INDEBUG(reason = "region is already at the end of the method";) goto FAILURE; } #endif // !FEATURE_EH_FUNCLETS // Walk the block list for this purpose: // 1. Verify that all the blocks in the range are either all rarely run or not rarely run. // When creating funclets, we ignore the run rarely flag, as we need to be able to move any blocks // in the range. CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(FEATURE_EH_FUNCLETS) bool isRare; isRare = bStart->isRunRarely(); #endif // !FEATURE_EH_FUNCLETS block = fgFirstBB; while (true) { if (block == bStart) { noway_assert(inTheRange == false); inTheRange = true; } else if (block == bLast->bbNext) { noway_assert(inTheRange == true); inTheRange = false; break; // we found the end, so we're done } if (inTheRange) { #if !defined(FEATURE_EH_FUNCLETS) // Unless all blocks are (not) run rarely we must return false. if (isRare != block->isRunRarely()) { INDEBUG(reason = "this region contains both rarely run and non-rarely run blocks";) goto FAILURE; } #endif // !FEATURE_EH_FUNCLETS validRange = true; } if (block == nullptr) { break; } block = block->bbNext; } // Ensure that bStart .. bLast defined a valid range noway_assert((validRange == true) && (inTheRange == false)); bPrev = bStart->bbPrev; noway_assert(bPrev != nullptr); // Can't move a range that includes the first block of the function. JITDUMP("Relocating %s range " FMT_BB ".." FMT_BB " (EH#%u) to end of BBlist\n", (relocateType == FG_RELOCATE_TRY) ? "try" : "handler", bStart->bbNum, bLast->bbNum, regionIndex); #ifdef DEBUG if (verbose) { fgDispBasicBlocks(); fgDispHandlerTab(); } #if !defined(FEATURE_EH_FUNCLETS) // This is really expensive, and quickly becomes O(n^n) with funclets // so only do it once after we've created them (see fgCreateFunclets) if (expensiveDebugCheckLevel >= 2) { fgDebugCheckBBlist(); } #endif #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) bStart->bbFlags |= BBF_FUNCLET_BEG; // Mark the start block of the funclet if (bMiddle != nullptr) { bMiddle->bbFlags |= BBF_FUNCLET_BEG; // Also mark the start block of a filter handler as a funclet } #endif // FEATURE_EH_FUNCLETS BasicBlock* bNext; bNext = bLast->bbNext; /* Temporarily unlink [bStart .. bLast] from the flow graph */ fgUnlinkRange(bStart, bLast); BasicBlock* insertAfterBlk; insertAfterBlk = fgLastBB; #if defined(FEATURE_EH_FUNCLETS) // There are several cases we need to consider when moving an EH range. // If moving a range X, we must consider its relationship to every other EH // range A in the table. Note that each entry in the table represents both // a protected region and a handler region (possibly including a filter region // that must live before and adjacent to the handler region), so we must // consider try and handler regions independently. These are the cases: // 1. A is completely contained within X (where "completely contained" means // that the 'begin' and 'last' parts of A are strictly between the 'begin' // and 'end' parts of X, and aren't equal to either, for example, they don't // share 'last' blocks). In this case, when we move X, A moves with it, and // the EH table doesn't need to change. // 2. X is completely contained within A. In this case, X gets extracted from A, // and the range of A shrinks, but because A is strictly within X, the EH // table doesn't need to change. // 3. A and X have exactly the same range. In this case, A is moving with X and // the EH table doesn't need to change. // 4. A and X share the 'last' block. There are two sub-cases: // (a) A is a larger range than X (such that the beginning of A precedes the // beginning of X): in this case, we are moving the tail of A. We set the // 'last' block of A to the the block preceding the beginning block of X. // (b) A is a smaller range than X. Thus, we are moving the entirety of A along // with X. In this case, nothing in the EH record for A needs to change. // 5. A and X share the 'beginning' block (but aren't the same range, as in #3). // This can never happen here, because we are only moving handler ranges (we don't // move try ranges), and handler regions cannot start at the beginning of a try // range or handler range and be a subset. // // Note that A and X must properly nest for the table to be well-formed. For example, // the beginning of A can't be strictly within the range of X (that is, the beginning // of A isn't shared with the beginning of X) and the end of A outside the range. for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { if (XTnum != regionIndex) // we don't need to update our 'last' pointer { if (HBtab->ebdTryLast == bLast) { // If we moved a set of blocks that were at the end of // a different try region then we may need to update ebdTryLast for (block = HBtab->ebdTryBeg; block != nullptr; block = block->bbNext) { if (block == bPrev) { // We were contained within it, so shrink its region by // setting its 'last' fgSetTryEnd(HBtab, bPrev); break; } else if (block == HBtab->ebdTryLast->bbNext) { // bPrev does not come after the TryBeg, thus we are larger, and // it is moving with us. break; } } } if (HBtab->ebdHndLast == bLast) { // If we moved a set of blocks that were at the end of // a different handler region then we must update ebdHndLast for (block = HBtab->ebdHndBeg; block != nullptr; block = block->bbNext) { if (block == bPrev) { fgSetHndEnd(HBtab, bPrev); break; } else if (block == HBtab->ebdHndLast->bbNext) { // bPrev does not come after the HndBeg break; } } } } } // end exception table iteration // Insert the block(s) we are moving after fgLastBlock fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); if (fgFirstFuncletBB == nullptr) // The funclet region isn't set yet { fgFirstFuncletBB = bStart; } else { assert(fgFirstFuncletBB != insertAfterBlk->bbNext); // We insert at the end, not at the beginning, of the funclet region. } // These asserts assume we aren't moving try regions (which we might need to do). Only // try regions can have fall through into or out of the region. noway_assert(!bPrev->bbFallsThrough()); // There can be no fall through into a filter or handler region noway_assert(!bLast->bbFallsThrough()); // There can be no fall through out of a handler region #ifdef DEBUG if (verbose) { printf("Create funclets: moved region\n"); fgDispHandlerTab(); } // We have to wait to do this until we've created all the additional regions // Because this relies on ebdEnclosingTryIndex and ebdEnclosingHndIndex #endif // DEBUG #else // !FEATURE_EH_FUNCLETS for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { if (XTnum == regionIndex) { // Don't update our handler's Last info continue; } if (HBtab->ebdTryLast == bLast) { // If we moved a set of blocks that were at the end of // a different try region then we may need to update ebdTryLast for (block = HBtab->ebdTryBeg; block != NULL; block = block->bbNext) { if (block == bPrev) { fgSetTryEnd(HBtab, bPrev); break; } else if (block == HBtab->ebdTryLast->bbNext) { // bPrev does not come after the TryBeg break; } } } if (HBtab->ebdHndLast == bLast) { // If we moved a set of blocks that were at the end of // a different handler region then we must update ebdHndLast for (block = HBtab->ebdHndBeg; block != NULL; block = block->bbNext) { if (block == bPrev) { fgSetHndEnd(HBtab, bPrev); break; } else if (block == HBtab->ebdHndLast->bbNext) { // bPrev does not come after the HndBeg break; } } } } // end exception table iteration // We have decided to insert the block(s) after fgLastBlock fgMoveBlocksAfter(bStart, bLast, insertAfterBlk); // If bPrev falls through, we will insert a jump to block fgConnectFallThrough(bPrev, bStart); // If bLast falls through, we will insert a jump to bNext fgConnectFallThrough(bLast, bNext); #endif // !FEATURE_EH_FUNCLETS goto DONE; FAILURE: #ifdef DEBUG if (verbose) { printf("*************** Failed fgRelocateEHRange(" FMT_BB ".." FMT_BB ") because %s\n", bStart->bbNum, bLast->bbNum, reason); } #endif // DEBUG bLast = nullptr; DONE: return bLast; } //------------------------------------------------------------------------ // fgMightHaveLoop: return true if there is a possibility that the method has a loop (a back edge is present). // This function doesn't depend on any previous loop computations, including predecessors. It looks for any // lexical back edge to a block previously seen in a forward walk of the block list. // // As it walks all blocks and all successors of each block (including EH successors), it is not cheap. // It returns as soon as any possible loop is discovered. // // Return Value: // true if there might be a loop // bool Compiler::fgMightHaveLoop() { // Don't use a BlockSet for this temporary bitset of blocks: we don't want to have to call EnsureBasicBlockEpoch() // and potentially change the block epoch. BitVecTraits blockVecTraits(fgBBNumMax + 1, this); BitVec blocksSeen(BitVecOps::MakeEmpty(&blockVecTraits)); for (BasicBlock* const block : Blocks()) { BitVecOps::AddElemD(&blockVecTraits, blocksSeen, block->bbNum); for (BasicBlock* const succ : block->GetAllSuccs(this)) { if (BitVecOps::IsMember(&blockVecTraits, blocksSeen, succ->bbNum)) { return true; } } } return false; } /***************************************************************************** * * Insert a BasicBlock before the given block. */ BasicBlock* Compiler::fgNewBBbefore(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion) { // Create a new BasicBlock and chain it in BasicBlock* newBlk = bbNewBasicBlock(jumpKind); newBlk->bbFlags |= BBF_INTERNAL; fgInsertBBbefore(block, newBlk); newBlk->bbRefs = 0; if (newBlk->bbFallsThrough() && block->isRunRarely()) { newBlk->bbSetRunRarely(); } if (extendRegion) { fgExtendEHRegionBefore(block); } else { // When extendRegion is false the caller is responsible for setting these two values newBlk->setTryIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely newBlk->setHndIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely } // We assume that if the block we are inserting before is in the cold region, then this new // block will also be in the cold region. newBlk->bbFlags |= (block->bbFlags & BBF_COLD); return newBlk; } /***************************************************************************** * * Insert a BasicBlock after the given block. */ BasicBlock* Compiler::fgNewBBafter(BBjumpKinds jumpKind, BasicBlock* block, bool extendRegion) { // Create a new BasicBlock and chain it in BasicBlock* newBlk = bbNewBasicBlock(jumpKind); newBlk->bbFlags |= BBF_INTERNAL; fgInsertBBafter(block, newBlk); newBlk->bbRefs = 0; if (block->bbFallsThrough() && block->isRunRarely()) { newBlk->bbSetRunRarely(); } if (extendRegion) { fgExtendEHRegionAfter(block); } else { // When extendRegion is false the caller is responsible for setting these two values newBlk->setTryIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely newBlk->setHndIndex(MAX_XCPTN_INDEX); // Note: this is still a legal index, just unlikely } // If the new block is in the cold region (because the block we are inserting after // is in the cold region), mark it as such. newBlk->bbFlags |= (block->bbFlags & BBF_COLD); return newBlk; } /***************************************************************************** * Inserts basic block before existing basic block. * * If insertBeforeBlk is in the funclet region, then newBlk will be in the funclet region. * (If insertBeforeBlk is the first block of the funclet region, then 'newBlk' will be the * new first block of the funclet region.) */ void Compiler::fgInsertBBbefore(BasicBlock* insertBeforeBlk, BasicBlock* newBlk) { if (insertBeforeBlk->bbPrev) { fgInsertBBafter(insertBeforeBlk->bbPrev, newBlk); } else { newBlk->setNext(fgFirstBB); fgFirstBB = newBlk; newBlk->bbPrev = nullptr; } #if defined(FEATURE_EH_FUNCLETS) /* Update fgFirstFuncletBB if insertBeforeBlk is the first block of the funclet region. */ if (fgFirstFuncletBB == insertBeforeBlk) { fgFirstFuncletBB = newBlk; } #endif // FEATURE_EH_FUNCLETS } /***************************************************************************** * Inserts basic block after existing basic block. * * If insertBeforeBlk is in the funclet region, then newBlk will be in the funclet region. * (It can't be used to insert a block as the first block of the funclet region). */ void Compiler::fgInsertBBafter(BasicBlock* insertAfterBlk, BasicBlock* newBlk) { newBlk->bbNext = insertAfterBlk->bbNext; if (insertAfterBlk->bbNext) { insertAfterBlk->bbNext->bbPrev = newBlk; } insertAfterBlk->bbNext = newBlk; newBlk->bbPrev = insertAfterBlk; if (fgLastBB == insertAfterBlk) { fgLastBB = newBlk; assert(fgLastBB->bbNext == nullptr); } } // We have two edges (bAlt => bCur) and (bCur => bNext). // // Returns true if the weight of (bAlt => bCur) // is greater than the weight of (bCur => bNext). // We compare the edge weights if we have valid edge weights // otherwise we compare blocks weights. // bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) { // bCur can't be NULL and must be a fall through bbJumpKind noway_assert(bCur != nullptr); noway_assert(bCur->bbFallsThrough()); noway_assert(bAlt != nullptr); // We only handle the cases when bAlt is a BBJ_ALWAYS or a BBJ_COND if (!bAlt->KindIs(BBJ_ALWAYS, BBJ_COND)) { return false; } // if bAlt doesn't jump to bCur it can't be a better fall through than bCur if (bAlt->bbJumpDest != bCur) { return false; } // Currently bNext is the fall through for bCur BasicBlock* bNext = bCur->bbNext; noway_assert(bNext != nullptr); // We will set result to true if bAlt is a better fall through than bCur bool result; if (fgHaveValidEdgeWeights) { // We will compare the edge weight for our two choices flowList* edgeFromAlt = fgGetPredForBlock(bCur, bAlt); flowList* edgeFromCur = fgGetPredForBlock(bNext, bCur); noway_assert(edgeFromCur != nullptr); noway_assert(edgeFromAlt != nullptr); result = (edgeFromAlt->edgeWeightMin() > edgeFromCur->edgeWeightMax()); } else { if (bAlt->bbJumpKind == BBJ_ALWAYS) { // Our result is true if bAlt's weight is more than bCur's weight result = (bAlt->bbWeight > bCur->bbWeight); } else { noway_assert(bAlt->bbJumpKind == BBJ_COND); // Our result is true if bAlt's weight is more than twice bCur's weight result = (bAlt->bbWeight > (2 * bCur->bbWeight)); } } return result; } //------------------------------------------------------------------------ // Finds the block closest to endBlk in the range [startBlk..endBlk) after which a block can be // inserted easily. Note that endBlk cannot be returned; its predecessor is the last block that can // be returned. The new block will be put in an EH region described by the arguments regionIndex, // putInTryRegion, startBlk, and endBlk (explained below), so it must be legal to place to put the // new block after the insertion location block, give it the specified EH region index, and not break // EH nesting rules. This function is careful to choose a block in the correct EH region. However, // it assumes that the new block can ALWAYS be placed at the end (just before endBlk). That means // that the caller must ensure that is true. // // Below are the possible cases for the arguments to this method: // 1. putInTryRegion == true and regionIndex > 0: // Search in the try region indicated by regionIndex. // 2. putInTryRegion == false and regionIndex > 0: // a. If startBlk is the first block of a filter and endBlk is the block after the end of the // filter (that is, the startBlk and endBlk match a filter bounds exactly), then choose a // location within this filter region. (Note that, due to IL rules, filters do not have any // EH nested within them.) Otherwise, filters are skipped. // b. Else, search in the handler region indicated by regionIndex. // 3. regionIndex = 0: // Search in the entire main method, excluding all EH regions. In this case, putInTryRegion must be true. // // This method makes sure to find an insertion point which would not cause the inserted block to // be put inside any inner try/filter/handler regions. // // The actual insertion occurs after the returned block. Note that the returned insertion point might // be the last block of a more nested EH region, because the new block will be inserted after the insertion // point, and will not extend the more nested EH region. For example: // // try3 try2 try1 // |--- | | BB01 // | |--- | BB02 // | | |--- BB03 // | | | BB04 // | |--- |--- BB05 // | BB06 // |----------------- BB07 // // for regionIndex==try3, putInTryRegion==true, we might return BB05, even though BB05 will have a try index // for try1 (the most nested 'try' region the block is in). That's because when we insert after BB05, the new // block will be in the correct, desired EH region, since try1 and try2 regions will not be extended to include // the inserted block. Furthermore, for regionIndex==try2, putInTryRegion==true, we can also return BB05. In this // case, when the new block is inserted, the try1 region remains the same, but we need extend region 'try2' to // include the inserted block. (We also need to check all parent regions as well, just in case any parent regions // also end on the same block, in which case we would also need to extend the parent regions. This is standard // procedure when inserting a block at the end of an EH region.) // // If nearBlk is non-nullptr then we return the closest block after nearBlk that will work best. // // We try to find a block in the appropriate region that is not a fallthrough block, so we can insert after it // without the need to insert a jump around the inserted block. // // Note that regionIndex is numbered the same as BasicBlock::bbTryIndex and BasicBlock::bbHndIndex, that is, "0" is // "main method" and otherwise is +1 from normal, so we can call, e.g., ehGetDsc(tryIndex - 1). // // Arguments: // regionIndex - the region index where the new block will be inserted. Zero means entire method; // non-zero means either a "try" or a "handler" region, depending on what putInTryRegion says. // putInTryRegion - 'true' to put the block in the 'try' region corresponding to 'regionIndex', 'false' // to put the block in the handler region. Should be 'true' if regionIndex==0. // startBlk - start block of range to search. // endBlk - end block of range to search (don't include this block in the range). Can be nullptr to indicate // the end of the function. // nearBlk - If non-nullptr, try to find an insertion location closely after this block. If nullptr, we insert // at the best location found towards the end of the acceptable block range. // jumpBlk - When nearBlk is set, this can be set to the block which jumps to bNext->bbNext (TODO: need to review // this?) // runRarely - true if the block being inserted is expected to be rarely run. This helps determine // the best place to put the new block, by putting in a place that has the same 'rarely run' characteristic. // // Return Value: // A block with the desired characteristics, so the new block will be inserted after this one. // If there is no suitable location, return nullptr. This should basically never happen. // BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, bool putInTryRegion, BasicBlock* startBlk, BasicBlock* endBlk, BasicBlock* nearBlk, BasicBlock* jumpBlk, bool runRarely) { noway_assert(startBlk != nullptr); noway_assert(startBlk != endBlk); noway_assert((regionIndex == 0 && putInTryRegion) || // Search in the main method (putInTryRegion && regionIndex > 0 && startBlk->bbTryIndex == regionIndex) || // Search in the specified try region (!putInTryRegion && regionIndex > 0 && startBlk->bbHndIndex == regionIndex)); // Search in the specified handler region #ifdef DEBUG // Assert that startBlk precedes endBlk in the block list. // We don't want to use bbNum to assert this condition, as we cannot depend on the block numbers being // sequential at all times. for (BasicBlock* b = startBlk; b != endBlk; b = b->bbNext) { assert(b != nullptr); // We reached the end of the block list, but never found endBlk. } #endif // DEBUG JITDUMP("fgFindInsertPoint(regionIndex=%u, putInTryRegion=%s, startBlk=" FMT_BB ", endBlk=" FMT_BB ", nearBlk=" FMT_BB ", " "jumpBlk=" FMT_BB ", runRarely=%s)\n", regionIndex, dspBool(putInTryRegion), startBlk->bbNum, (endBlk == nullptr) ? 0 : endBlk->bbNum, (nearBlk == nullptr) ? 0 : nearBlk->bbNum, (jumpBlk == nullptr) ? 0 : jumpBlk->bbNum, dspBool(runRarely)); bool insertingIntoFilter = false; if (!putInTryRegion) { EHblkDsc* const dsc = ehGetDsc(regionIndex - 1); insertingIntoFilter = dsc->HasFilter() && (startBlk == dsc->ebdFilter) && (endBlk == dsc->ebdHndBeg); } bool reachedNear = false; // Have we reached 'nearBlk' in our search? If not, we'll keep searching. bool inFilter = false; // Are we in a filter region that we need to skip? BasicBlock* bestBlk = nullptr; // Set to the best insertion point we've found so far that meets all the EH requirements. BasicBlock* goodBlk = nullptr; // Set to an acceptable insertion point that we'll use if we don't find a 'best' option. BasicBlock* blk; if (nearBlk != nullptr) { // Does the nearBlk precede the startBlk? for (blk = nearBlk; blk != nullptr; blk = blk->bbNext) { if (blk == startBlk) { reachedNear = true; break; } else if (blk == endBlk) { break; } } } for (blk = startBlk; blk != endBlk; blk = blk->bbNext) { // The only way (blk == nullptr) could be true is if the caller passed an endBlk that preceded startBlk in the // block list, or if endBlk isn't in the block list at all. In DEBUG, we'll instead hit the similar // well-formedness assert earlier in this function. noway_assert(blk != nullptr); if (blk == nearBlk) { reachedNear = true; } if (blk->bbCatchTyp == BBCT_FILTER) { // Record the fact that we entered a filter region, so we don't insert into filters... // Unless the caller actually wanted the block inserted in this exact filter region. if (!insertingIntoFilter || (blk != startBlk)) { inFilter = true; } } else if (blk->bbCatchTyp == BBCT_FILTER_HANDLER) { // Record the fact that we exited a filter region. inFilter = false; } // Don't insert a block inside this filter region. if (inFilter) { continue; } // Note that the new block will be inserted AFTER "blk". We check to make sure that doing so // would put the block in the correct EH region. We make an assumption here that you can // ALWAYS insert the new block before "endBlk" (that is, at the end of the search range) // and be in the correct EH region. This is must be guaranteed by the caller (as it is by // fgNewBBinRegion(), which passes the search range as an exact EH region block range). // Because of this assumption, we only check the EH information for blocks before the last block. if (blk->bbNext != endBlk) { // We are in the middle of the search range. We can't insert the new block in // an inner try or handler region. We can, however, set the insertion // point to the last block of an EH try/handler region, if the enclosing // region is the region we wish to insert in. (Since multiple regions can // end at the same block, we need to search outwards, checking that the // block is the last block of every EH region out to the region we want // to insert in.) This is especially useful for putting a call-to-finally // block on AMD64 immediately after its corresponding 'try' block, so in the // common case, we'll just fall through to it. For example: // // BB01 // BB02 -- first block of try // BB03 // BB04 -- last block of try // BB05 -- first block of finally // BB06 // BB07 -- last block of handler // BB08 // // Assume there is only one try/finally, so BB01 and BB08 are in the "main function". // For AMD64 call-to-finally, we'll want to insert the BBJ_CALLFINALLY in // the main function, immediately after BB04. This allows us to do that. if (!fgCheckEHCanInsertAfterBlock(blk, regionIndex, putInTryRegion)) { // Can't insert here. continue; } } // Look for an insert location: // 1. We want blocks that don't end with a fall through, // 2. Also, when blk equals nearBlk we may want to insert here. if (!blk->bbFallsThrough() || (blk == nearBlk)) { bool updateBestBlk = true; // We will probably update the bestBlk // If blk falls through then we must decide whether to use the nearBlk // hint if (blk->bbFallsThrough()) { noway_assert(blk == nearBlk); if (jumpBlk != nullptr) { updateBestBlk = fgIsBetterFallThrough(blk, jumpBlk); } else { updateBestBlk = false; } } // If we already have a best block, see if the 'runRarely' flags influences // our choice. If we want a runRarely insertion point, and the existing best // block is run rarely but the current block isn't run rarely, then don't // update the best block. // TODO-CQ: We should also handle the reverse case, where runRarely is false (we // want a non-rarely-run block), but bestBlock->isRunRarely() is true. In that // case, we should update the block, also. Probably what we want is: // (bestBlk->isRunRarely() != runRarely) && (blk->isRunRarely() == runRarely) if (updateBestBlk && (bestBlk != nullptr) && runRarely && bestBlk->isRunRarely() && !blk->isRunRarely()) { updateBestBlk = false; } if (updateBestBlk) { // We found a 'best' insertion location, so save it away. bestBlk = blk; // If we've reached nearBlk, we've satisfied all the criteria, // so we're done. if (reachedNear) { goto DONE; } // If we haven't reached nearBlk, keep looking for a 'best' location, just // in case we'll find one at or after nearBlk. If no nearBlk was specified, // we prefer inserting towards the end of the given range, so keep looking // for more acceptable insertion locations. } } // No need to update goodBlk after we have set bestBlk, but we could still find a better // bestBlk, so keep looking. if (bestBlk != nullptr) { continue; } // Set the current block as a "good enough" insertion point, if it meets certain criteria. // We'll return this block if we don't find a "best" block in the search range. The block // can't be a BBJ_CALLFINALLY of a BBJ_CALLFINALLY/BBJ_ALWAYS pair (since we don't want // to insert anything between these two blocks). Otherwise, we can use it. However, // if we'd previously chosen a BBJ_COND block, then we'd prefer the "good" block to be // something else. We keep updating it until we've reached the 'nearBlk', to push it as // close to endBlk as possible. if (!blk->isBBCallAlwaysPair()) { if (goodBlk == nullptr) { goodBlk = blk; } else if ((goodBlk->bbJumpKind == BBJ_COND) || (blk->bbJumpKind != BBJ_COND)) { if ((blk == nearBlk) || !reachedNear) { goodBlk = blk; } } } } // If we didn't find a non-fall_through block, then insert at the last good block. if (bestBlk == nullptr) { bestBlk = goodBlk; } DONE: #if defined(JIT32_GCENCODER) // If we are inserting into a filter and the best block is the end of the filter region, we need to // insert after its predecessor instead: the JIT32 GC encoding used by the x86 CLR ABI states that the // terminal block of a filter region is its exit block. If the filter region consists of a single block, // a new block cannot be inserted without either splitting the single block before inserting a new block // or inserting the new block before the single block and updating the filter description such that the // inserted block is marked as the entry block for the filter. Becuase this sort of split can be complex // (especially given that it must ensure that the liveness of the exception object is properly tracked), // we avoid this situation by never generating single-block filters on x86 (see impPushCatchArgOnStack). if (insertingIntoFilter && (bestBlk == endBlk->bbPrev)) { assert(bestBlk != startBlk); bestBlk = bestBlk->bbPrev; } #endif // defined(JIT32_GCENCODER) return bestBlk; } //------------------------------------------------------------------------ // Creates a new BasicBlock and inserts it in a specific EH region, given by 'tryIndex', 'hndIndex', and 'putInFilter'. // // If 'putInFilter' it true, then the block is inserted in the filter region given by 'hndIndex'. In this case, tryIndex // must be a less nested EH region (that is, tryIndex > hndIndex). // // Otherwise, the block is inserted in either the try region or the handler region, depending on which one is the inner // region. In other words, if the try region indicated by tryIndex is nested in the handler region indicated by // hndIndex, // then the new BB will be created in the try region. Vice versa. // // Note that tryIndex and hndIndex are numbered the same as BasicBlock::bbTryIndex and BasicBlock::bbHndIndex, that is, // "0" is "main method" and otherwise is +1 from normal, so we can call, e.g., ehGetDsc(tryIndex - 1). // // To be more specific, this function will create a new BB in one of the following 5 regions (if putInFilter is false): // 1. When tryIndex = 0 and hndIndex = 0: // The new BB will be created in the method region. // 2. When tryIndex != 0 and hndIndex = 0: // The new BB will be created in the try region indicated by tryIndex. // 3. When tryIndex == 0 and hndIndex != 0: // The new BB will be created in the handler region indicated by hndIndex. // 4. When tryIndex != 0 and hndIndex != 0 and tryIndex < hndIndex: // In this case, the try region is nested inside the handler region. Therefore, the new BB will be created // in the try region indicated by tryIndex. // 5. When tryIndex != 0 and hndIndex != 0 and tryIndex > hndIndex: // In this case, the handler region is nested inside the try region. Therefore, the new BB will be created // in the handler region indicated by hndIndex. // // Note that if tryIndex != 0 and hndIndex != 0 then tryIndex must not be equal to hndIndex (this makes sense because // if they are equal, you are asking to put the new block in both the try and handler, which is impossible). // // The BasicBlock will not be inserted inside an EH region that is more nested than the requested tryIndex/hndIndex // region (so the function is careful to skip more nested EH regions when searching for a place to put the new block). // // This function cannot be used to insert a block as the first block of any region. It always inserts a block after // an existing block in the given region. // // If nearBlk is nullptr, or the block is run rarely, then the new block is assumed to be run rarely. // // Arguments: // jumpKind - the jump kind of the new block to create. // tryIndex - the try region to insert the new block in, described above. This must be a number in the range // [0..compHndBBtabCount]. // hndIndex - the handler region to insert the new block in, described above. This must be a number in the range // [0..compHndBBtabCount]. // nearBlk - insert the new block closely after this block, if possible. If nullptr, put the new block anywhere // in the requested region. // putInFilter - put the new block in the filter region given by hndIndex, as described above. // runRarely - 'true' if the new block is run rarely. // insertAtEnd - 'true' if the block should be inserted at the end of the region. Note: this is currently only // implemented when inserting into the main function (not into any EH region). // // Return Value: // The new block. BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind, unsigned tryIndex, unsigned hndIndex, BasicBlock* nearBlk, bool putInFilter /* = false */, bool runRarely /* = false */, bool insertAtEnd /* = false */) { assert(tryIndex <= compHndBBtabCount); assert(hndIndex <= compHndBBtabCount); /* afterBlk is the block which will precede the newBB */ BasicBlock* afterBlk; // start and end limit for inserting the block BasicBlock* startBlk = nullptr; BasicBlock* endBlk = nullptr; bool putInTryRegion = true; unsigned regionIndex = 0; // First, figure out which region (the "try" region or the "handler" region) to put the newBB in. if ((tryIndex == 0) && (hndIndex == 0)) { assert(!putInFilter); endBlk = fgEndBBAfterMainFunction(); // don't put new BB in funclet region if (insertAtEnd || (nearBlk == nullptr)) { /* We'll just insert the block at the end of the method, before the funclets */ afterBlk = fgLastBBInMainFunction(); goto _FoundAfterBlk; } else { // We'll search through the entire method startBlk = fgFirstBB; } noway_assert(regionIndex == 0); } else { noway_assert(tryIndex > 0 || hndIndex > 0); PREFIX_ASSUME(tryIndex <= compHndBBtabCount); PREFIX_ASSUME(hndIndex <= compHndBBtabCount); // Decide which region to put in, the "try" region or the "handler" region. if (tryIndex == 0) { noway_assert(hndIndex > 0); putInTryRegion = false; } else if (hndIndex == 0) { noway_assert(tryIndex > 0); noway_assert(putInTryRegion); assert(!putInFilter); } else { noway_assert(tryIndex > 0 && hndIndex > 0 && tryIndex != hndIndex); putInTryRegion = (tryIndex < hndIndex); } if (putInTryRegion) { // Try region is the inner region. // In other words, try region must be nested inside the handler region. noway_assert(hndIndex == 0 || bbInHandlerRegions(hndIndex - 1, ehGetDsc(tryIndex - 1)->ebdTryBeg)); assert(!putInFilter); } else { // Handler region is the inner region. // In other words, handler region must be nested inside the try region. noway_assert(tryIndex == 0 || bbInTryRegions(tryIndex - 1, ehGetDsc(hndIndex - 1)->ebdHndBeg)); } // Figure out the start and end block range to search for an insertion location. Pick the beginning and // ending blocks of the target EH region (the 'endBlk' is one past the last block of the EH region, to make // loop iteration easier). Note that, after funclets have been created (for FEATURE_EH_FUNCLETS), // this linear block range will not include blocks of handlers for try/handler clauses nested within // this EH region, as those blocks have been extracted as funclets. That is ok, though, because we don't // want to insert a block in any nested EH region. if (putInTryRegion) { // We will put the newBB in the try region. EHblkDsc* ehDsc = ehGetDsc(tryIndex - 1); startBlk = ehDsc->ebdTryBeg; endBlk = ehDsc->ebdTryLast->bbNext; regionIndex = tryIndex; } else if (putInFilter) { // We will put the newBB in the filter region. EHblkDsc* ehDsc = ehGetDsc(hndIndex - 1); startBlk = ehDsc->ebdFilter; endBlk = ehDsc->ebdHndBeg; regionIndex = hndIndex; } else { // We will put the newBB in the handler region. EHblkDsc* ehDsc = ehGetDsc(hndIndex - 1); startBlk = ehDsc->ebdHndBeg; endBlk = ehDsc->ebdHndLast->bbNext; regionIndex = hndIndex; } noway_assert(regionIndex > 0); } // Now find the insertion point. afterBlk = fgFindInsertPoint(regionIndex, putInTryRegion, startBlk, endBlk, nearBlk, nullptr, runRarely); _FoundAfterBlk:; /* We have decided to insert the block after 'afterBlk'. */ noway_assert(afterBlk != nullptr); JITDUMP("fgNewBBinRegion(jumpKind=%u, tryIndex=%u, hndIndex=%u, putInFilter=%s, runRarely=%s, insertAtEnd=%s): " "inserting after " FMT_BB "\n", jumpKind, tryIndex, hndIndex, dspBool(putInFilter), dspBool(runRarely), dspBool(insertAtEnd), afterBlk->bbNum); return fgNewBBinRegionWorker(jumpKind, afterBlk, regionIndex, putInTryRegion); } //------------------------------------------------------------------------ // Creates a new BasicBlock and inserts it in the same EH region as 'srcBlk'. // // See the implementation of fgNewBBinRegion() used by this one for more notes. // // Arguments: // jumpKind - the jump kind of the new block to create. // srcBlk - insert the new block in the same EH region as this block, and closely after it if possible. // // Return Value: // The new block. BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind, BasicBlock* srcBlk, bool runRarely /* = false */, bool insertAtEnd /* = false */) { assert(srcBlk != nullptr); const unsigned tryIndex = srcBlk->bbTryIndex; const unsigned hndIndex = srcBlk->bbHndIndex; bool putInFilter = false; // Check to see if we need to put the new block in a filter. We do if srcBlk is in a filter. // This can only be true if there is a handler index, and the handler region is more nested than the // try region (if any). This is because no EH regions can be nested within a filter. if (BasicBlock::ehIndexMaybeMoreNested(hndIndex, tryIndex)) { assert(hndIndex != 0); // If hndIndex is more nested, we must be in some handler! putInFilter = ehGetDsc(hndIndex - 1)->InFilterRegionBBRange(srcBlk); } return fgNewBBinRegion(jumpKind, tryIndex, hndIndex, srcBlk, putInFilter, runRarely, insertAtEnd); } //------------------------------------------------------------------------ // Creates a new BasicBlock and inserts it at the end of the function. // // See the implementation of fgNewBBinRegion() used by this one for more notes. // // Arguments: // jumpKind - the jump kind of the new block to create. // // Return Value: // The new block. BasicBlock* Compiler::fgNewBBinRegion(BBjumpKinds jumpKind) { return fgNewBBinRegion(jumpKind, 0, 0, nullptr, /* putInFilter */ false, /* runRarely */ false, /* insertAtEnd */ true); } //------------------------------------------------------------------------ // Creates a new BasicBlock, and inserts it after 'afterBlk'. // // The block cannot be inserted into a more nested try/handler region than that specified by 'regionIndex'. // (It is given exactly 'regionIndex'.) Thus, the parameters must be passed to ensure proper EH nesting // rules are followed. // // Arguments: // jumpKind - the jump kind of the new block to create. // afterBlk - insert the new block after this one. // regionIndex - the block will be put in this EH region. // putInTryRegion - If true, put the new block in the 'try' region corresponding to 'regionIndex', and // set its handler index to the most nested handler region enclosing that 'try' region. // Otherwise, put the block in the handler region specified by 'regionIndex', and set its 'try' // index to the most nested 'try' region enclosing that handler region. // // Return Value: // The new block. BasicBlock* Compiler::fgNewBBinRegionWorker(BBjumpKinds jumpKind, BasicBlock* afterBlk, unsigned regionIndex, bool putInTryRegion) { /* Insert the new block */ BasicBlock* afterBlkNext = afterBlk->bbNext; (void)afterBlkNext; // prevent "unused variable" error from GCC BasicBlock* newBlk = fgNewBBafter(jumpKind, afterBlk, false); if (putInTryRegion) { noway_assert(regionIndex <= MAX_XCPTN_INDEX); newBlk->bbTryIndex = (unsigned short)regionIndex; newBlk->bbHndIndex = bbFindInnermostHandlerRegionContainingTryRegion(regionIndex); } else { newBlk->bbTryIndex = bbFindInnermostTryRegionContainingHandlerRegion(regionIndex); noway_assert(regionIndex <= MAX_XCPTN_INDEX); newBlk->bbHndIndex = (unsigned short)regionIndex; } // We're going to compare for equal try regions (to handle the case of 'mutually protect' // regions). We need to save off the current try region, otherwise we might change it // before it gets compared later, thereby making future comparisons fail. BasicBlock* newTryBeg; BasicBlock* newTryLast; (void)ehInitTryBlockRange(newBlk, &newTryBeg, &newTryLast); unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Is afterBlk at the end of a try region? if (HBtab->ebdTryLast == afterBlk) { noway_assert(afterBlkNext == newBlk->bbNext); bool extendTryRegion = false; if (newBlk->hasTryIndex()) { // We're adding a block after the last block of some try region. Do // we extend the try region to include the block, or not? // If the try region is exactly the same as the try region // associated with the new block (based on the block's try index, // which represents the innermost try the block is a part of), then // we extend it. // If the try region is a "parent" try region -- an enclosing try region // that has the same last block as the new block's try region -- then // we also extend. For example: // try { // 1 // ... // try { // 2 // ... // } /* 2 */ } /* 1 */ // This example is meant to indicate that both try regions 1 and 2 end at // the same block, and we're extending 2. Thus, we must also extend 1. If we // only extended 2, we would break proper nesting. (Dev11 bug 137967) extendTryRegion = HBtab->ebdIsSameTry(newTryBeg, newTryLast) || bbInTryRegions(XTnum, newBlk); } // Does newBlk extend this try region? if (extendTryRegion) { // Yes, newBlk extends this try region // newBlk is the now the new try last block fgSetTryEnd(HBtab, newBlk); } } // Is afterBlk at the end of a handler region? if (HBtab->ebdHndLast == afterBlk) { noway_assert(afterBlkNext == newBlk->bbNext); // Does newBlk extend this handler region? bool extendHndRegion = false; if (newBlk->hasHndIndex()) { // We're adding a block after the last block of some handler region. Do // we extend the handler region to include the block, or not? // If the handler region is exactly the same as the handler region // associated with the new block (based on the block's handler index, // which represents the innermost handler the block is a part of), then // we extend it. // If the handler region is a "parent" handler region -- an enclosing // handler region that has the same last block as the new block's handler // region -- then we also extend. For example: // catch { // 1 // ... // catch { // 2 // ... // } /* 2 */ } /* 1 */ // This example is meant to indicate that both handler regions 1 and 2 end at // the same block, and we're extending 2. Thus, we must also extend 1. If we // only extended 2, we would break proper nesting. (Dev11 bug 372051) extendHndRegion = bbInHandlerRegions(XTnum, newBlk); } if (extendHndRegion) { // Yes, newBlk extends this handler region // newBlk is now the last block of the handler. fgSetHndEnd(HBtab, newBlk); } } } /* If afterBlk falls through, we insert a jump around newBlk */ fgConnectFallThrough(afterBlk, newBlk->bbNext); #ifdef DEBUG fgVerifyHandlerTab(); #endif return newBlk; } //------------------------------------------------------------------------ // fgUseThrowHelperBlocks: Determinate does compiler use throw helper blocks. // // Note: // For debuggable code, codegen will generate the 'throw' code inline. // Return Value: // true if 'throw' helper block should be created. bool Compiler::fgUseThrowHelperBlocks() { return !opts.compDbgCode; }
1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/jit/fgopt.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "lower.h" // for LowerRange() // Flowgraph Optimization //------------------------------------------------------------------------ // fgDominate: Returns true if block `b1` dominates block `b2`. // // Arguments: // b1, b2 -- Two blocks to compare. // // Return Value: // true if `b1` dominates `b2`. If either b1 or b2 were created after dominators were calculated, // but the dominator information still exists, try to determine if we can make a statement about // b1 dominating b2 based on existing dominator information and other information, such as // predecessor lists or loop information. // // Assumptions: // -- Dominators have been calculated (`fgDomsComputed` is true). // bool Compiler::fgDominate(BasicBlock* b1, BasicBlock* b2) { noway_assert(fgDomsComputed); assert(!fgCheapPredsValid); // // If the fgModified flag is false then we made some modifications to // the flow graph, like adding a new block or changing a conditional branch // into an unconditional branch. // // We can continue to use the dominator and reachable information to // unmark loops as long as we haven't renumbered the blocks or we aren't // asking for information about a new block. // if (b2->bbNum > fgDomBBcount) { if (b1 == b2) { return true; } for (BasicBlock* const predBlock : b2->PredBlocks()) { if (!fgDominate(b1, predBlock)) { return false; } } return b2->bbPreds != nullptr; } if (b1->bbNum > fgDomBBcount) { // If b1 is a loop preheader (that was created after the dominators were calculated), // then it has a single successor that is the loop entry, and it is the only non-loop // predecessor of the loop entry. Thus, b1 dominates the loop entry and also dominates // what the loop entry dominates. if (b1->bbFlags & BBF_LOOP_PREHEADER) { BasicBlock* loopEntry = b1->GetUniqueSucc(); assert(loopEntry != nullptr); return fgDominate(loopEntry, b2); } // unknown dominators; err on the safe side and return false return false; } /* Check if b1 dominates b2 */ unsigned numA = b1->bbNum; noway_assert(numA <= fgDomBBcount); unsigned numB = b2->bbNum; noway_assert(numB <= fgDomBBcount); // What we want to ask here is basically if A is in the middle of the path from B to the root (the entry node) // in the dominator tree. Turns out that can be translated as: // // A dom B <-> preorder(A) <= preorder(B) && postorder(A) >= postorder(B) // // where the equality holds when you ask if A dominates itself. bool treeDom = fgDomTreePreOrder[numA] <= fgDomTreePreOrder[numB] && fgDomTreePostOrder[numA] >= fgDomTreePostOrder[numB]; return treeDom; } //------------------------------------------------------------------------ // fgReachable: Returns true if block `b1` can reach block `b2`. // // Arguments: // b1, b2 -- Two blocks to compare. // // Return Value: // true if `b1` can reach `b2` via some path. If either b1 or b2 were created after dominators were calculated, // but the dominator information still exists, try to determine if we can make a statement about // b1 reaching b2 based on existing reachability information and other information, such as // predecessor lists. // // Assumptions: // -- Dominators have been calculated (`fgDomsComputed` is true). // -- Reachability information has been calculated (`fgReachabilitySetsValid` is true). // bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2) { noway_assert(fgDomsComputed); assert(!fgCheapPredsValid); // // If the fgModified flag is false then we made some modifications to // the flow graph, like adding a new block or changing a conditional branch // into an unconditional branch. // // We can continue to use the dominator and reachable information to // unmark loops as long as we haven't renumbered the blocks or we aren't // asking for information about a new block // if (b2->bbNum > fgDomBBcount) { if (b1 == b2) { return true; } for (BasicBlock* const predBlock : b2->PredBlocks()) { if (fgReachable(b1, predBlock)) { return true; } } return false; } if (b1->bbNum > fgDomBBcount) { noway_assert(b1->KindIs(BBJ_NONE, BBJ_ALWAYS, BBJ_COND)); if (b1->KindIs(BBJ_NONE, BBJ_COND) && fgReachable(b1->bbNext, b2)) { return true; } if (b1->KindIs(BBJ_ALWAYS, BBJ_COND) && fgReachable(b1->bbJumpDest, b2)) { return true; } return false; } /* Check if b1 can reach b2 */ assert(fgReachabilitySetsValid); assert(BasicBlockBitSetTraits::GetSize(this) == fgDomBBcount + 1); return BlockSetOps::IsMember(this, b2->bbReach, b1->bbNum); } //------------------------------------------------------------------------ // fgUpdateChangedFlowGraph: Update changed flow graph information. // // If the flow graph has changed, we need to recompute various information if we want to use it again. // This does similar work to `fgComputeReachability`, but the caller can pick and choose what needs // to be recomputed if they know certain things do NOT need to be recomputed. // // Arguments: // computePreds -- `true` if we should recompute predecessors // computeDoms -- `true` if we should recompute dominators // computeReturnBlocks -- `true` if we should recompute the list of return blocks // computeLoops -- `true` if we should recompute the loop table // void Compiler::fgUpdateChangedFlowGraph(const bool computePreds, const bool computeDoms, const bool computeReturnBlocks, const bool computeLoops) { // We need to clear this so we don't hit an assert calling fgRenumberBlocks(). fgDomsComputed = false; if (computeReturnBlocks) { fgComputeReturnBlocks(); } JITDUMP("\nRenumbering the basic blocks for fgUpdateChangeFlowGraph\n"); fgRenumberBlocks(); if (computePreds) // This condition is only here until all phases don't require it. { fgComputePreds(); } fgComputeEnterBlocksSet(); fgComputeReachabilitySets(); if (computeDoms) { fgComputeDoms(); } if (computeLoops) { // Reset the loop info annotations and find the loops again. // Note: this is similar to `RecomputeLoopInfo`. optResetLoopInfo(); optSetBlockWeights(); optFindLoops(); } } //------------------------------------------------------------------------ // fgComputeReachabilitySets: Compute the bbReach sets. // // This can be called to recompute the bbReach sets after the flow graph changes, such as when the // number of BasicBlocks change (and thus, the BlockSet epoch changes). // // This also sets the BBF_GC_SAFE_POINT flag on blocks. // // TODO-Throughput: This algorithm consumes O(n^2) because we're using dense bitsets to // represent reachability. While this yields O(1) time queries, it bloats the memory usage // for large code. We can do better if we try to approach reachability by // computing the strongly connected components of the flow graph. That way we only need // linear memory to label every block with its SCC. // // Assumptions: // Assumes the predecessor lists are correct. // void Compiler::fgComputeReachabilitySets() { assert(fgComputePredsDone); assert(!fgCheapPredsValid); #ifdef DEBUG fgReachabilitySetsValid = false; #endif // DEBUG for (BasicBlock* const block : Blocks()) { // Initialize the per-block bbReach sets. It creates a new empty set, // because the block epoch could change since the previous initialization // and the old set could have wrong size. block->bbReach = BlockSetOps::MakeEmpty(this); /* Mark block as reaching itself */ BlockSetOps::AddElemD(this, block->bbReach, block->bbNum); } // Find the reachable blocks. Also, set BBF_GC_SAFE_POINT. bool change; BlockSet newReach(BlockSetOps::MakeEmpty(this)); do { change = false; for (BasicBlock* const block : Blocks()) { BlockSetOps::Assign(this, newReach, block->bbReach); bool predGcSafe = (block->bbPreds != nullptr); // Do all of our predecessor blocks have a GC safe bit? for (BasicBlock* const predBlock : block->PredBlocks()) { /* Union the predecessor's reachability set into newReach */ BlockSetOps::UnionD(this, newReach, predBlock->bbReach); if (!(predBlock->bbFlags & BBF_GC_SAFE_POINT)) { predGcSafe = false; } } if (predGcSafe) { block->bbFlags |= BBF_GC_SAFE_POINT; } if (!BlockSetOps::Equal(this, newReach, block->bbReach)) { BlockSetOps::Assign(this, block->bbReach, newReach); change = true; } } } while (change); #ifdef DEBUG if (verbose) { printf("\nAfter computing reachability sets:\n"); fgDispReach(); } fgReachabilitySetsValid = true; #endif // DEBUG } //------------------------------------------------------------------------ // fgComputeReturnBlocks: Compute the set of BBJ_RETURN blocks. // // Initialize `fgReturnBlocks` to a list of the BBJ_RETURN blocks in the function. // void Compiler::fgComputeReturnBlocks() { fgReturnBlocks = nullptr; for (BasicBlock* const block : Blocks()) { // If this is a BBJ_RETURN block, add it to our list of all BBJ_RETURN blocks. This list is only // used to find return blocks. if (block->bbJumpKind == BBJ_RETURN) { fgReturnBlocks = new (this, CMK_Reachability) BasicBlockList(block, fgReturnBlocks); } } fgReturnBlocksComputed = true; #ifdef DEBUG if (verbose) { printf("Return blocks:"); if (fgReturnBlocks == nullptr) { printf(" NONE"); } else { for (const BasicBlockList* bl = fgReturnBlocks; bl != nullptr; bl = bl->next) { printf(" " FMT_BB, bl->block->bbNum); } } printf("\n"); } #endif // DEBUG } //------------------------------------------------------------------------ // fgComputeEnterBlocksSet: Compute the entry blocks set. // // Initialize fgEnterBlks to the set of blocks for which we don't have explicit control // flow edges. These are the entry basic block and each of the EH handler blocks. // For ARM, also include the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair, // to avoid creating "retless" calls, since we need the BBJ_ALWAYS for the purpose // of unwinding, even if the call doesn't return (due to an explicit throw, for example). // void Compiler::fgComputeEnterBlocksSet() { #ifdef DEBUG fgEnterBlksSetValid = false; #endif // DEBUG fgEnterBlks = BlockSetOps::MakeEmpty(this); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) fgAlwaysBlks = BlockSetOps::MakeEmpty(this); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) /* Now set the entry basic block */ BlockSetOps::AddElemD(this, fgEnterBlks, fgFirstBB->bbNum); assert(fgFirstBB->bbNum == 1); if (compHndBBtabCount > 0) { /* Also 'or' in the handler basic blocks */ for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->HasFilter()) { BlockSetOps::AddElemD(this, fgEnterBlks, HBtab->ebdFilter->bbNum); } BlockSetOps::AddElemD(this, fgEnterBlks, HBtab->ebdHndBeg->bbNum); } } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // For ARM code, prevent creating retless calls by adding the BBJ_ALWAYS to the "fgAlwaysBlks" list. for (BasicBlock* const block : Blocks()) { if (block->bbJumpKind == BBJ_CALLFINALLY) { assert(block->isBBCallAlwaysPair()); // Don't remove the BBJ_ALWAYS block that is only here for the unwinder. BlockSetOps::AddElemD(this, fgAlwaysBlks, block->bbNext->bbNum); } } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG if (verbose) { printf("Enter blocks: "); BlockSetOps::Iter iter(this, fgEnterBlks); unsigned bbNum = 0; while (iter.NextElem(&bbNum)) { printf(FMT_BB " ", bbNum); } printf("\n"); } #endif // DEBUG #ifdef DEBUG fgEnterBlksSetValid = true; #endif // DEBUG } //------------------------------------------------------------------------ // fgRemoveUnreachableBlocks: Remove unreachable blocks. // // Some blocks (marked with BBF_DONT_REMOVE) can't be removed even if unreachable, in which case they // are converted to `throw` blocks. Internal throw helper blocks and the single return block (if any) // are never considered unreachable. // // Return Value: // Return true if changes were made that may cause additional blocks to be removable. // // Assumptions: // The reachability sets must be computed and valid. // bool Compiler::fgRemoveUnreachableBlocks() { assert(!fgCheapPredsValid); assert(fgReachabilitySetsValid); bool hasUnreachableBlocks = false; bool changed = false; /* Record unreachable blocks */ for (BasicBlock* const block : Blocks()) { /* Internal throw blocks are also reachable */ if (fgIsThrowHlpBlk(block)) { continue; } else if (block == genReturnBB) { // Don't remove statements for the genReturnBB block, as we might have special hookups there. // For example, the profiler hookup needs to have the "void GT_RETURN" statement // to properly set the info.compProfilerCallback flag. continue; } else { // If any of the entry blocks can reach this block, then we skip it. if (!BlockSetOps::IsEmptyIntersection(this, fgEnterBlks, block->bbReach)) { continue; } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (!BlockSetOps::IsEmptyIntersection(this, fgAlwaysBlks, block->bbReach)) { continue; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } // Remove all the code for the block fgUnreachableBlock(block); // Make sure that the block was marked as removed */ noway_assert(block->bbFlags & BBF_REMOVED); // Some blocks mark the end of trys and catches // and can't be removed. We convert these into // empty blocks of type BBJ_THROW if (block->bbFlags & BBF_DONT_REMOVE) { const bool bIsBBCallAlwaysPair = block->isBBCallAlwaysPair(); // Unmark the block as removed, clear BBF_INTERNAL, and set BBJ_IMPORTED // The successors may be unreachable after this change. changed |= block->NumSucc() > 0; block->bbFlags &= ~(BBF_REMOVED | BBF_INTERNAL); block->bbFlags |= BBF_IMPORTED; block->bbJumpKind = BBJ_THROW; block->bbSetRunRarely(); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // If this is a <BBJ_CALLFINALLY, BBJ_ALWAYS> pair, we have to clear BBF_FINALLY_TARGET flag on // the target node (of BBJ_ALWAYS) since BBJ_CALLFINALLY node is getting converted to a BBJ_THROW. if (bIsBBCallAlwaysPair) { noway_assert(block->bbNext->bbJumpKind == BBJ_ALWAYS); fgClearFinallyTargetBit(block->bbNext->bbJumpDest); } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else { /* We have to call fgRemoveBlock next */ hasUnreachableBlocks = true; changed = true; } } if (hasUnreachableBlocks) { // Now remove the unreachable blocks for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) { // If we marked a block with BBF_REMOVED then we need to call fgRemoveBlock() on it if (block->bbFlags & BBF_REMOVED) { fgRemoveBlock(block, /* unreachable */ true); // TODO: couldn't we have fgRemoveBlock() return the block after the (last)one removed // so we don't need the code below? // When we have a BBJ_CALLFINALLY, BBJ_ALWAYS pair; fgRemoveBlock will remove // both blocks, so we must advance 1 extra place in the block list // if (block->isBBCallAlwaysPair()) { block = block->bbNext; } } } } return changed; } //------------------------------------------------------------------------ // fgComputeReachability: Compute the dominator and reachable sets. // // Use `fgReachable()` to check reachability, `fgDominate()` to check dominance. // // Also, compute the list of return blocks `fgReturnBlocks` and set of enter blocks `fgEnterBlks`. // Delete unreachable blocks. // // Assumptions: // Assumes the predecessor lists are computed and correct. // void Compiler::fgComputeReachability() { #ifdef DEBUG if (verbose) { printf("*************** In fgComputeReachability\n"); } fgVerifyHandlerTab(); // Make sure that the predecessor lists are accurate assert(fgComputePredsDone); fgDebugCheckBBlist(); #endif // DEBUG fgComputeReturnBlocks(); // Compute reachability and then delete blocks determined to be unreachable. If we delete blocks, we // need to loop, as that might have caused more blocks to become unreachable. This can happen in the // case where a call to a finally is unreachable and deleted (maybe the call to the finally is // preceded by a throw or an infinite loop), making the blocks following the finally unreachable. // However, all EH entry blocks are considered global entry blocks, causing the blocks following the // call to the finally to stay rooted, until a second round of reachability is done. // The dominator algorithm expects that all blocks can be reached from the fgEnterBlks set. unsigned passNum = 1; bool changed; do { // Just to be paranoid, avoid infinite loops; fall back to minopts. if (passNum > 10) { noway_assert(!"Too many unreachable block removal loops"); } // Walk the flow graph, reassign block numbers to keep them in ascending order. JITDUMP("\nRenumbering the basic blocks for fgComputeReachability pass #%u\n", passNum); passNum++; fgRenumberBlocks(); // // Compute fgEnterBlks // fgComputeEnterBlocksSet(); // // Compute bbReach // fgComputeReachabilitySets(); // // Use reachability information to delete unreachable blocks. // changed = fgRemoveUnreachableBlocks(); } while (changed); #ifdef DEBUG if (verbose) { printf("\nAfter computing reachability:\n"); fgDispBasicBlocks(verboseTrees); printf("\n"); } fgVerifyHandlerTab(); fgDebugCheckBBlist(true); #endif // DEBUG // // Now, compute the dominators // fgComputeDoms(); } //------------------------------------------------------------- // fgDfsInvPostOrder: Helper function for computing dominance information. // // In order to be able to compute dominance, we need to first get a DFS reverse post order sort on the basic flow // graph for the dominance algorithm to operate correctly. The reason why we need the DFS sort is because we will // build the dominance sets using the partial order induced by the DFS sorting. With this precondition not // holding true, the algorithm doesn't work properly. // void Compiler::fgDfsInvPostOrder() { // NOTE: This algorithm only pays attention to the actual blocks. It ignores the imaginary entry block. // visited : Once we run the DFS post order sort recursive algorithm, we mark the nodes we visited to avoid // backtracking. BlockSet visited(BlockSetOps::MakeEmpty(this)); // We begin by figuring out which basic blocks don't have incoming edges and mark them as // start nodes. Later on we run the recursive algorithm for each node that we // mark in this step. BlockSet_ValRet_T startNodes = fgDomFindStartNodes(); // Make sure fgEnterBlks are still there in startNodes, even if they participate in a loop (i.e., there is // an incoming edge into the block). assert(fgEnterBlksSetValid); BlockSetOps::UnionD(this, startNodes, fgEnterBlks); assert(BlockSetOps::IsMember(this, startNodes, fgFirstBB->bbNum)); // Call the flowgraph DFS traversal helper. unsigned postIndex = 1; for (BasicBlock* const block : Blocks()) { // If the block has no predecessors, and we haven't already visited it (because it's in fgEnterBlks but also // reachable from the first block), go ahead and traverse starting from this block. if (BlockSetOps::IsMember(this, startNodes, block->bbNum) && !BlockSetOps::IsMember(this, visited, block->bbNum)) { fgDfsInvPostOrderHelper(block, visited, &postIndex); } } // After the DFS reverse postorder is completed, we must have visited all the basic blocks. noway_assert(postIndex == fgBBcount + 1); noway_assert(fgBBNumMax == fgBBcount); #ifdef DEBUG if (0 && verbose) { printf("\nAfter doing a post order traversal of the BB graph, this is the ordering:\n"); for (unsigned i = 1; i <= fgBBNumMax; ++i) { printf("%02u -> " FMT_BB "\n", i, fgBBInvPostOrder[i]->bbNum); } printf("\n"); } #endif // DEBUG } //------------------------------------------------------------- // fgDomFindStartNodes: Helper for dominance computation to find the start nodes block set. // // The start nodes is a set that represents which basic blocks in the flow graph don't have incoming edges. // We begin assuming everything is a start block and remove any block that is a successor of another. // // Returns: // Block set of start nodes. // BlockSet_ValRet_T Compiler::fgDomFindStartNodes() { BlockSet startNodes(BlockSetOps::MakeFull(this)); for (BasicBlock* const block : Blocks()) { for (BasicBlock* const succ : block->Succs(this)) { BlockSetOps::RemoveElemD(this, startNodes, succ->bbNum); } } #ifdef DEBUG if (verbose) { printf("\nDominator computation start blocks (those blocks with no incoming edges):\n"); BlockSetOps::Iter iter(this, startNodes); unsigned bbNum = 0; while (iter.NextElem(&bbNum)) { printf(FMT_BB " ", bbNum); } printf("\n"); } #endif // DEBUG return startNodes; } //------------------------------------------------------------------------ // fgDfsInvPostOrderHelper: Helper to assign post-order numbers to blocks. // // Arguments: // block - The starting entry block // visited - The set of visited blocks // count - Pointer to the Dfs counter // // Notes: // Compute a non-recursive DFS traversal of the flow graph using an // evaluation stack to assign post-order numbers. // void Compiler::fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count) { // Assume we haven't visited this node yet (callers ensure this). assert(!BlockSetOps::IsMember(this, visited, block->bbNum)); // Allocate a local stack to hold the DFS traversal actions necessary // to compute pre/post-ordering of the control flowgraph. ArrayStack<DfsBlockEntry> stack(getAllocator(CMK_ArrayStack)); // Push the first block on the stack to seed the traversal. stack.Push(DfsBlockEntry(DSS_Pre, block)); // Flag the node we just visited to avoid backtracking. BlockSetOps::AddElemD(this, visited, block->bbNum); // The search is terminated once all the actions have been processed. while (!stack.Empty()) { DfsBlockEntry current = stack.Pop(); BasicBlock* currentBlock = current.dfsBlock; if (current.dfsStackState == DSS_Pre) { // This is a pre-visit that corresponds to the first time the // node is encountered in the spanning tree and receives pre-order // numberings. By pushing the post-action on the stack here we // are guaranteed to only process it after all of its successors // pre and post actions are processed. stack.Push(DfsBlockEntry(DSS_Post, currentBlock)); for (BasicBlock* const succ : currentBlock->Succs(this)) { // If this is a node we haven't seen before, go ahead and process if (!BlockSetOps::IsMember(this, visited, succ->bbNum)) { // Push a pre-visit action for this successor onto the stack and // mark it as visited in case this block has multiple successors // to the same node (multi-graph). stack.Push(DfsBlockEntry(DSS_Pre, succ)); BlockSetOps::AddElemD(this, visited, succ->bbNum); } } } else { // This is a post-visit that corresponds to the last time the // node is visited in the spanning tree and only happens after // all descendents in the spanning tree have had pre and post // actions applied. assert(current.dfsStackState == DSS_Post); unsigned invCount = fgBBcount - *count + 1; assert(1 <= invCount && invCount <= fgBBNumMax); fgBBInvPostOrder[invCount] = currentBlock; currentBlock->bbPostOrderNum = invCount; ++(*count); } } } //------------------------------------------------------------------------ // fgComputeDoms: Computer dominators. Use `fgDominate()` to check dominance. // // Compute immediate dominators, the dominator tree and and its pre/post-order traversal numbers. // // Also sets BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY flag on blocks dominated by exceptional entry blocks. // // Notes: // Immediate dominator computation is based on "A Simple, Fast Dominance Algorithm" // by Keith D. Cooper, Timothy J. Harvey, and Ken Kennedy. // void Compiler::fgComputeDoms() { assert(!fgCheapPredsValid); #ifdef DEBUG if (verbose) { printf("*************** In fgComputeDoms\n"); } fgVerifyHandlerTab(); // Make sure that the predecessor lists are accurate. // Also check that the blocks are properly, densely numbered (so calling fgRenumberBlocks is not necessary). fgDebugCheckBBlist(true); // Assert things related to the BlockSet epoch. assert(fgBBcount == fgBBNumMax); assert(BasicBlockBitSetTraits::GetSize(this) == fgBBNumMax + 1); #endif // DEBUG BlockSet processedBlks(BlockSetOps::MakeEmpty(this)); fgBBInvPostOrder = new (this, CMK_DominatorMemory) BasicBlock*[fgBBNumMax + 1]{}; fgDfsInvPostOrder(); noway_assert(fgBBInvPostOrder[0] == nullptr); // flRoot and bbRoot represent an imaginary unique entry point in the flow graph. // All the orphaned EH blocks and fgFirstBB will temporarily have its predecessors list // (with bbRoot as the only basic block in it) set as flRoot. // Later on, we clear their predecessors and let them to be nullptr again. // Since we number basic blocks starting at one, the imaginary entry block is conveniently numbered as zero. BasicBlock bbRoot; bbRoot.bbPreds = nullptr; bbRoot.bbNum = 0; bbRoot.bbIDom = &bbRoot; bbRoot.bbPostOrderNum = 0; bbRoot.bbFlags = BBF_EMPTY; flowList flRoot(&bbRoot, nullptr); fgBBInvPostOrder[0] = &bbRoot; // Mark both bbRoot and fgFirstBB processed BlockSetOps::AddElemD(this, processedBlks, 0); // bbRoot == block #0 BlockSetOps::AddElemD(this, processedBlks, 1); // fgFirstBB == block #1 assert(fgFirstBB->bbNum == 1); // Special case fgFirstBB to say its IDom is bbRoot. fgFirstBB->bbIDom = &bbRoot; BasicBlock* block = nullptr; for (block = fgFirstBB->bbNext; block != nullptr; block = block->bbNext) { // If any basic block has no predecessors then we flag it as processed and temporarily // mark its precedessor list to be flRoot. This makes the flowgraph connected, // a precondition that is needed by the dominance algorithm to operate properly. if (block->bbPreds == nullptr) { block->bbPreds = &flRoot; block->bbIDom = &bbRoot; BlockSetOps::AddElemD(this, processedBlks, block->bbNum); } else { block->bbIDom = nullptr; } } // Mark the EH blocks as entry blocks and also flag them as processed. if (compHndBBtabCount > 0) { for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->HasFilter()) { HBtab->ebdFilter->bbIDom = &bbRoot; BlockSetOps::AddElemD(this, processedBlks, HBtab->ebdFilter->bbNum); } HBtab->ebdHndBeg->bbIDom = &bbRoot; BlockSetOps::AddElemD(this, processedBlks, HBtab->ebdHndBeg->bbNum); } } // Now proceed to compute the immediate dominators for each basic block. bool changed = true; while (changed) { changed = false; // Process each actual block; don't process the imaginary predecessor block. for (unsigned i = 1; i <= fgBBNumMax; ++i) { flowList* first = nullptr; BasicBlock* newidom = nullptr; block = fgBBInvPostOrder[i]; // If we have a block that has bbRoot as its bbIDom // it means we flag it as processed and as an entry block so // in this case we're all set. if (block->bbIDom == &bbRoot) { continue; } // Pick up the first processed predecesor of the current block. for (first = block->bbPreds; first != nullptr; first = first->flNext) { if (BlockSetOps::IsMember(this, processedBlks, first->getBlock()->bbNum)) { break; } } noway_assert(first != nullptr); // We assume the first processed predecessor will be the // immediate dominator and then compute the forward flow analysis. newidom = first->getBlock(); for (flowList* p = block->bbPreds; p != nullptr; p = p->flNext) { if (p->getBlock() == first->getBlock()) { continue; } if (p->getBlock()->bbIDom != nullptr) { // fgIntersectDom is basically the set intersection between // the dominance sets of the new IDom and the current predecessor // Since the nodes are ordered in DFS inverse post order and // IDom induces a tree, fgIntersectDom actually computes // the lowest common ancestor in the dominator tree. newidom = fgIntersectDom(p->getBlock(), newidom); } } // If the Immediate dominator changed, assign the new one // to the current working basic block. if (block->bbIDom != newidom) { noway_assert(newidom != nullptr); block->bbIDom = newidom; changed = true; } BlockSetOps::AddElemD(this, processedBlks, block->bbNum); } } // As stated before, once we have computed immediate dominance we need to clear // all the basic blocks whose predecessor list was set to flRoot. This // reverts that and leaves the blocks the same as before. for (BasicBlock* const block : Blocks()) { if (block->bbPreds == &flRoot) { block->bbPreds = nullptr; } } fgCompDominatedByExceptionalEntryBlocks(); #ifdef DEBUG if (verbose) { fgDispDoms(); } #endif fgNumberDomTree(fgBuildDomTree()); fgModified = false; fgDomBBcount = fgBBcount; assert(fgBBcount == fgBBNumMax); assert(BasicBlockBitSetTraits::GetSize(this) == fgDomBBcount + 1); fgDomsComputed = true; } //------------------------------------------------------------------------ // fgBuildDomTree: Build the dominator tree for the current flowgraph. // // Returns: // An array of dominator tree nodes, indexed by BasicBlock::bbNum. // // Notes: // Immediate dominators must have already been computed in BasicBlock::bbIDom // before calling this. // DomTreeNode* Compiler::fgBuildDomTree() { JITDUMP("\nInside fgBuildDomTree\n"); unsigned bbArraySize = fgBBNumMax + 1; DomTreeNode* domTree = new (this, CMK_DominatorMemory) DomTreeNode[bbArraySize]{}; BasicBlock* imaginaryRoot = fgFirstBB->bbIDom; if (imaginaryRoot != nullptr) { // If the first block has a dominator then this must be the imaginary entry block added // by fgComputeDoms, it is not actually part of the flowgraph and should have number 0. assert(imaginaryRoot->bbNum == 0); assert(imaginaryRoot->bbIDom == imaginaryRoot); // Clear the imaginary dominator to turn the tree back to a forest. fgFirstBB->bbIDom = nullptr; } // If the imaginary root is present then we'll need to create a forest instead of a tree. // Forest roots are chained via DomTreeNode::nextSibling and we keep track of this list's // tail in order to append to it. The head of the list is fgFirstBB, by construction. BasicBlock* rootListTail = fgFirstBB; // Traverse the entire block list to build the dominator tree. Skip fgFirstBB // as it is always a root of the dominator forest. for (BasicBlock* const block : Blocks(fgFirstBB->bbNext)) { BasicBlock* parent = block->bbIDom; if (parent != imaginaryRoot) { assert(block->bbNum < bbArraySize); assert(parent->bbNum < bbArraySize); domTree[block->bbNum].nextSibling = domTree[parent->bbNum].firstChild; domTree[parent->bbNum].firstChild = block; } else if (imaginaryRoot != nullptr) { assert(rootListTail->bbNum < bbArraySize); domTree[rootListTail->bbNum].nextSibling = block; rootListTail = block; // Clear the imaginary dominator to turn the tree back to a forest. block->bbIDom = nullptr; } } JITDUMP("\nAfter computing the Dominance Tree:\n"); DBEXEC(verbose, fgDispDomTree(domTree)); return domTree; } #ifdef DEBUG void Compiler::fgDispDomTree(DomTreeNode* domTree) { for (unsigned i = 1; i <= fgBBNumMax; ++i) { if (domTree[i].firstChild != nullptr) { printf(FMT_BB " : ", i); for (BasicBlock* child = domTree[i].firstChild; child != nullptr; child = domTree[child->bbNum].nextSibling) { printf(FMT_BB " ", child->bbNum); } printf("\n"); } } printf("\n"); } #endif // DEBUG //------------------------------------------------------------------------ // fgNumberDomTree: Assign pre/post-order numbers to the dominator tree. // // Arguments: // domTree - The dominator tree node array // // Notes: // Runs a non-recursive DFS traversal of the dominator tree to assign // pre-order and post-order numbers. These numbers are used to provide // constant time lookup ancestor/descendent tests between pairs of nodes // in the tree. // void Compiler::fgNumberDomTree(DomTreeNode* domTree) { class NumberDomTreeVisitor : public DomTreeVisitor<NumberDomTreeVisitor> { unsigned m_preNum; unsigned m_postNum; public: NumberDomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : DomTreeVisitor(compiler, domTree) { } void Begin() { unsigned bbArraySize = m_compiler->fgBBNumMax + 1; m_compiler->fgDomTreePreOrder = new (m_compiler, CMK_DominatorMemory) unsigned[bbArraySize]{}; m_compiler->fgDomTreePostOrder = new (m_compiler, CMK_DominatorMemory) unsigned[bbArraySize]{}; // The preorder and postorder numbers. // We start from 1 to match the bbNum ordering. m_preNum = 1; m_postNum = 1; } void PreOrderVisit(BasicBlock* block) { m_compiler->fgDomTreePreOrder[block->bbNum] = m_preNum++; } void PostOrderVisit(BasicBlock* block) { m_compiler->fgDomTreePostOrder[block->bbNum] = m_postNum++; } void End() { noway_assert(m_preNum == m_compiler->fgBBNumMax + 1); noway_assert(m_postNum == m_compiler->fgBBNumMax + 1); noway_assert(m_compiler->fgDomTreePreOrder[0] == 0); // Unused first element noway_assert(m_compiler->fgDomTreePostOrder[0] == 0); // Unused first element noway_assert(m_compiler->fgDomTreePreOrder[1] == 1); // First block should be first in pre order #ifdef DEBUG if (m_compiler->verbose) { printf("\nAfter numbering the dominator tree:\n"); for (unsigned i = 1; i <= m_compiler->fgBBNumMax; ++i) { printf(FMT_BB ": pre=%02u, post=%02u\n", i, m_compiler->fgDomTreePreOrder[i], m_compiler->fgDomTreePostOrder[i]); } } #endif // DEBUG } }; NumberDomTreeVisitor visitor(this, domTree); visitor.WalkTree(); } //------------------------------------------------------------- // fgIntersectDom: Intersect two immediate dominator sets. // // Find the lowest common ancestor in the dominator tree between two basic blocks. The LCA in the dominance tree // represents the closest dominator between the two basic blocks. Used to adjust the IDom value in fgComputDoms. // // Arguments: // a, b - two blocks to intersect // // Returns: // The least common ancestor of `a` and `b` in the IDom tree. // BasicBlock* Compiler::fgIntersectDom(BasicBlock* a, BasicBlock* b) { BasicBlock* finger1 = a; BasicBlock* finger2 = b; while (finger1 != finger2) { while (finger1->bbPostOrderNum > finger2->bbPostOrderNum) { finger1 = finger1->bbIDom; } while (finger2->bbPostOrderNum > finger1->bbPostOrderNum) { finger2 = finger2->bbIDom; } } return finger1; } //------------------------------------------------------------- // fgGetDominatorSet: Return a set of blocks that dominate `block`. // // Note: this is slow compared to calling fgDominate(), especially if doing a single check comparing // two blocks. // // Arguments: // block - get the set of blocks which dominate this block // // Returns: // A set of blocks which dominate `block`. // BlockSet_ValRet_T Compiler::fgGetDominatorSet(BasicBlock* block) { assert(block != nullptr); BlockSet domSet(BlockSetOps::MakeEmpty(this)); do { BlockSetOps::AddElemD(this, domSet, block->bbNum); if (block == block->bbIDom) { break; // We found a cycle in the IDom list, so we're done. } block = block->bbIDom; } while (block != nullptr); return domSet; } //------------------------------------------------------------- // fgInitBlockVarSets: Initialize the per-block variable sets (used for liveness analysis). // // Notes: // Initializes: // bbVarUse, bbVarDef, bbLiveIn, bbLiveOut, // bbMemoryUse, bbMemoryDef, bbMemoryLiveIn, bbMemoryLiveOut, // bbScope // void Compiler::fgInitBlockVarSets() { for (BasicBlock* const block : Blocks()) { block->InitVarSets(this); } fgBBVarSetsInited = true; } //------------------------------------------------------------------------ // fgPostImportationCleanups: clean up flow graph after importation // // Notes: // // Find and remove any basic blocks that are useless (e.g. they have not been // imported because they are not reachable, or they have been optimized away). // // Remove try regions where no blocks in the try were imported. // Update the end of try and handler regions where trailing blocks were not imported. // Update the start of try regions that were partially imported (OSR) // // For OSR, add "step blocks" and conditional logic to ensure the path from // method entry to the OSR logical entry point always flows through the first // block of any enclosing try. // // In particular, given a method like // // S0; // try { // S1; // try { // S2; // for (...) {} // OSR logical entry here // } // } // // Where the Sn are arbitrary hammocks of code, the OSR logical entry point // would be in the middle of a nested try. We can't branch there directly // from the OSR method entry. So we transform the flow to: // // _firstCall = 0; // goto pt1; // S0; // pt1: // try { // if (_firstCall == 0) goto pt2; // S1; // pt2: // try { // if (_firstCall == 0) goto pp; // S2; // pp: // _firstCall = 1; // for (...) // } // } // // where the "state variable" _firstCall guides execution appropriately // from OSR method entry, and flow always enters the try blocks at the // first block of the try. // void Compiler::fgPostImportationCleanup() { JITDUMP("\n*************** In fgPostImportationCleanup\n"); BasicBlock* cur; BasicBlock* nxt; // If we remove any blocks, we'll have to do additional work unsigned removedBlks = 0; for (cur = fgFirstBB; cur != nullptr; cur = nxt) { // Get hold of the next block (in case we delete 'cur') nxt = cur->bbNext; // Should this block be removed? if (!(cur->bbFlags & BBF_IMPORTED)) { noway_assert(cur->isEmpty()); if (ehCanDeleteEmptyBlock(cur)) { JITDUMP(FMT_BB " was not imported, marking as removed (%d)\n", cur->bbNum, removedBlks); cur->bbFlags |= BBF_REMOVED; removedBlks++; // Drop the block from the list. // // We rely on the fact that this does not clear out // cur->bbNext or cur->bbPrev in the code that // follows. fgUnlinkBlock(cur); } else { // We were prevented from deleting this block by EH // normalization. Mark the block as imported. cur->bbFlags |= BBF_IMPORTED; } } } // If no blocks were removed, we're done. // Unless we are an OSR method with a try entry. // if ((removedBlks == 0) && !(opts.IsOSR() && fgOSREntryBB->hasTryIndex())) { return; } // Update all references in the exception handler table. // // We may have made the entire try block unreachable. // Check for this case and remove the entry from the EH table. // // For OSR, just the initial part of a try range may become // unreachable; if so we need to shrink the try range down // to the portion that was imported. unsigned XTnum; EHblkDsc* HBtab; unsigned delCnt = 0; // Walk the EH regions from inner to outer for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { AGAIN: // If start of a try region was not imported, then we either // need to trim the region extent, or remove the region // entirely. // // In normal importation, it is not valid to jump into the // middle of a try, so if the try entry was not imported, the // entire try can be removed. // // In OSR importation the entry patchpoint may be in the // middle of a try, and we need to determine how much of the // try ended up getting imported. Because of backwards // branches we may end up importing the entire try even though // execution starts in the middle. // // Note it is common in both cases for the ends of trys (and // associated handlers) to end up not getting imported, so if // the try region is not removed, we always check if we need // to trim the ends. // if (HBtab->ebdTryBeg->bbFlags & BBF_REMOVED) { // Usual case is that the entire try can be removed. bool removeTryRegion = true; if (opts.IsOSR()) { // For OSR we may need to trim the try region start. // // We rely on the fact that removed blocks have been snipped from // the main block list, but that those removed blocks have kept // their bbprev (and bbnext) links. // // Find the first unremoved block before the try entry block. // BasicBlock* const oldTryEntry = HBtab->ebdTryBeg; BasicBlock* tryEntryPrev = oldTryEntry->bbPrev; while ((tryEntryPrev != nullptr) && ((tryEntryPrev->bbFlags & BBF_REMOVED) != 0)) { tryEntryPrev = tryEntryPrev->bbPrev; } // Because we've added an unremovable scratch block as // fgFirstBB, this backwards walk should always find // some block. assert(tryEntryPrev != nullptr); // If there is a next block of this prev block, and that block is // contained in the current try, we'd like to make that block // the new start of the try, and keep the region. BasicBlock* newTryEntry = tryEntryPrev->bbNext; bool updateTryEntry = false; if ((newTryEntry != nullptr) && bbInTryRegions(XTnum, newTryEntry)) { // We want to trim the begin extent of the current try region to newTryEntry. // // This method is invoked after EH normalization, so we may need to ensure all // try regions begin at blocks that are not the start or end of some other try. // // So, see if this block is already the start or end of some other EH region. if (bbIsTryBeg(newTryEntry)) { // We've already end-trimmed the inner try. Do the same now for the // current try, so it is easier to detect when they mutually protect. // (we will call this again later, which is harmless). fgSkipRmvdBlocks(HBtab); // If this try and the inner try form a "mutually protected try region" // then we must continue to share the try entry block. EHblkDsc* const HBinner = ehGetBlockTryDsc(newTryEntry); assert(HBinner->ebdTryBeg == newTryEntry); if (HBtab->ebdTryLast != HBinner->ebdTryLast) { updateTryEntry = true; } } // Also, a try and handler cannot start at the same block else if (bbIsHandlerBeg(newTryEntry)) { updateTryEntry = true; } if (updateTryEntry) { // We need to trim the current try to begin at a different block. Normally // this would be problematic as we don't have enough context to redirect // all the incoming edges, but we know oldTryEntry is unreachable. // So there are no incoming edges to worry about. // assert(!tryEntryPrev->bbFallsThrough()); // What follows is similar to fgNewBBInRegion, but we can't call that // here as the oldTryEntry is no longer in the main bb list. newTryEntry = bbNewBasicBlock(BBJ_NONE); newTryEntry->bbFlags |= (BBF_IMPORTED | BBF_INTERNAL); // Set the right EH region indices on this new block. // // Patchpoints currently cannot be inside handler regions, // and so likewise the old and new try region entries. assert(!oldTryEntry->hasHndIndex()); newTryEntry->setTryIndex(XTnum); newTryEntry->clearHndIndex(); fgInsertBBafter(tryEntryPrev, newTryEntry); // Generally this (unreachable) empty new try entry block can fall through // to the next block, but in cases where there's a nested try with an // out of order handler, the next block may be a handler. So even though // this new try entry block is unreachable, we need to give it a // plausible flow target. Simplest is to just mark it as a throw. if (bbIsHandlerBeg(newTryEntry->bbNext)) { newTryEntry->bbJumpKind = BBJ_THROW; } JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to new " FMT_BB "\n", XTnum + delCnt, oldTryEntry->bbNum, newTryEntry->bbNum); } else { // We can just trim the try to newTryEntry as it is not part of some inner try or handler. JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to " FMT_BB "\n", XTnum + delCnt, oldTryEntry->bbNum, newTryEntry->bbNum); } // Update the handler table fgSetTryBeg(HBtab, newTryEntry); // Try entry blocks get specially marked and have special protection. HBtab->ebdTryBeg->bbFlags |= BBF_DONT_REMOVE | BBF_TRY_BEG; // We are keeping this try region removeTryRegion = false; } } if (removeTryRegion) { // In the dump, refer to the region by its original index. JITDUMP("Try region #%u (" FMT_BB " -- " FMT_BB ") not imported, removing try from the EH table\n", XTnum + delCnt, HBtab->ebdTryBeg->bbNum, HBtab->ebdTryLast->bbNum); delCnt++; fgRemoveEHTableEntry(XTnum); if (XTnum < compHndBBtabCount) { // There are more entries left to process, so do more. Note that // HBtab now points to the next entry, that we copied down to the // current slot. XTnum also stays the same. goto AGAIN; } // no more entries (we deleted the last one), so exit the loop break; } } // If we get here, the try entry block was not removed. // Check some invariants. assert(HBtab->ebdTryBeg->bbFlags & BBF_IMPORTED); assert(HBtab->ebdTryBeg->bbFlags & BBF_DONT_REMOVE); assert(HBtab->ebdHndBeg->bbFlags & BBF_IMPORTED); assert(HBtab->ebdHndBeg->bbFlags & BBF_DONT_REMOVE); if (HBtab->HasFilter()) { assert(HBtab->ebdFilter->bbFlags & BBF_IMPORTED); assert(HBtab->ebdFilter->bbFlags & BBF_DONT_REMOVE); } // Finally, do region end trimming -- update try and handler ends to reflect removed blocks. fgSkipRmvdBlocks(HBtab); } // If this is OSR, and the OSR entry was mid-try or in a nested try entry, // add the appropriate step block logic. // if (opts.IsOSR()) { BasicBlock* const osrEntry = fgOSREntryBB; BasicBlock* entryJumpTarget = osrEntry; if (osrEntry->hasTryIndex()) { EHblkDsc* enclosingTry = ehGetBlockTryDsc(osrEntry); BasicBlock* tryEntry = enclosingTry->ebdTryBeg; bool const inNestedTry = (enclosingTry->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX); bool const osrEntryMidTry = (osrEntry != tryEntry); if (inNestedTry || osrEntryMidTry) { JITDUMP("OSR Entry point at IL offset 0x%0x (" FMT_BB ") is %s%s try region EH#%u\n", info.compILEntry, osrEntry->bbNum, osrEntryMidTry ? "within " : "at the start of ", inNestedTry ? "nested" : "", osrEntry->getTryIndex()); // We'll need a state variable to control the branching. // // It will be initialized to zero when the OSR method is entered and set to one // once flow reaches the osrEntry. // unsigned const entryStateVar = lvaGrabTemp(false DEBUGARG("OSR entry state var")); lvaTable[entryStateVar].lvType = TYP_INT; // Zero the entry state at method entry. // GenTree* const initEntryState = gtNewTempAssign(entryStateVar, gtNewZeroConNode(TYP_INT)); fgNewStmtAtBeg(fgFirstBB, initEntryState); // Set the state variable once control flow reaches the OSR entry. // GenTree* const setEntryState = gtNewTempAssign(entryStateVar, gtNewOneConNode(TYP_INT)); fgNewStmtAtBeg(osrEntry, setEntryState); // Helper method to add flow // auto addConditionalFlow = [this, entryStateVar, &entryJumpTarget](BasicBlock* fromBlock, BasicBlock* toBlock) { // We may have previously though this try entry was unreachable, but now we're going to // step through it on the way to the OSR entry. So ensure it has plausible profile weight. // if (fgHaveProfileData() && !fromBlock->hasProfileWeight()) { JITDUMP("Updating block weight for now-reachable try entry " FMT_BB " via " FMT_BB "\n", fromBlock->bbNum, fgFirstBB->bbNum); fromBlock->inheritWeight(fgFirstBB); } BasicBlock* const newBlock = fgSplitBlockAtBeginning(fromBlock); fromBlock->bbFlags |= BBF_INTERNAL; newBlock->bbFlags &= ~BBF_DONT_REMOVE; GenTree* const entryStateLcl = gtNewLclvNode(entryStateVar, TYP_INT); GenTree* const compareEntryStateToZero = gtNewOperNode(GT_EQ, TYP_INT, entryStateLcl, gtNewZeroConNode(TYP_INT)); GenTree* const jumpIfEntryStateZero = gtNewOperNode(GT_JTRUE, TYP_VOID, compareEntryStateToZero); fgNewStmtAtBeg(fromBlock, jumpIfEntryStateZero); fromBlock->bbJumpKind = BBJ_COND; fromBlock->bbJumpDest = toBlock; fgAddRefPred(toBlock, fromBlock); newBlock->inheritWeight(fromBlock); entryJumpTarget = fromBlock; }; // If this is a mid-try entry, add a conditional branch from the start of the try to osr entry point. // if (osrEntryMidTry) { addConditionalFlow(tryEntry, osrEntry); } // Add conditional branches for each successive enclosing try with a distinct // entry block. // while (enclosingTry->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) { EHblkDsc* const nextTry = ehGetDsc(enclosingTry->ebdEnclosingTryIndex); BasicBlock* const nextTryEntry = nextTry->ebdTryBeg; // We don't need to add flow for mutual-protect regions // (multiple tries that all share the same entry block). // if (nextTryEntry != tryEntry) { addConditionalFlow(nextTryEntry, tryEntry); } enclosingTry = nextTry; tryEntry = nextTryEntry; } // Transform the method entry flow, if necessary. // // Note even if the OSR is in a nested try, if it's a mutual protect try // it can be reached directly from "outside". // assert(fgFirstBB->bbJumpDest == osrEntry); assert(fgFirstBB->bbJumpKind == BBJ_ALWAYS); if (entryJumpTarget != osrEntry) { fgFirstBB->bbJumpDest = entryJumpTarget; fgRemoveRefPred(osrEntry, fgFirstBB); fgAddRefPred(entryJumpTarget, fgFirstBB); JITDUMP("OSR: redirecting flow from method entry " FMT_BB " to OSR entry " FMT_BB " via step blocks.\n", fgFirstBB->bbNum, fgOSREntryBB->bbNum); } else { JITDUMP("OSR: leaving direct flow from method entry " FMT_BB " to OSR entry " FMT_BB ", no step blocks needed.\n", fgFirstBB->bbNum, fgOSREntryBB->bbNum); } } else { // If OSR entry is the start of an un-nested try, no work needed. // // We won't hit this case today as we don't allow the try entry to be the target of a backedge, // and currently patchpoints only appear at targets of backedges. // JITDUMP("OSR Entry point at IL offset 0x%0x (" FMT_BB ") is start of an un-nested try region, no step blocks needed.\n", info.compILEntry, osrEntry->bbNum); assert(entryJumpTarget == osrEntry); assert(fgOSREntryBB == osrEntry); } } else { // If OSR entry is not within a try, no work needed. // JITDUMP("OSR Entry point at IL offset 0x%0x (" FMT_BB ") is not in a try region, no step blocks needed.\n", info.compILEntry, osrEntry->bbNum); assert(entryJumpTarget == osrEntry); assert(fgOSREntryBB == osrEntry); } } // Renumber the basic blocks JITDUMP("\nRenumbering the basic blocks for fgPostImporterCleanup\n"); fgRenumberBlocks(); #ifdef DEBUG fgVerifyHandlerTab(); #endif // DEBUG } //------------------------------------------------------------- // fgCanCompactBlocks: Determine if a block and its bbNext successor can be compacted. // // Arguments: // block - block to check. If nullptr, return false. // bNext - bbNext of `block`. If nullptr, return false. // // Returns: // true if compaction is allowed // bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) { if ((block == nullptr) || (bNext == nullptr)) { return false; } noway_assert(block->bbNext == bNext); if (block->bbJumpKind != BBJ_NONE) { return false; } // If the next block has multiple incoming edges, we can still compact if the first block is empty. // However, not if it is the beginning of a handler. if (bNext->countOfInEdges() != 1 && (!block->isEmpty() || (block->bbFlags & BBF_FUNCLET_BEG) || (block->bbCatchTyp != BBCT_NONE))) { return false; } if (bNext->bbFlags & BBF_DONT_REMOVE) { return false; } // Don't compact the first block if it was specially created as a scratch block. if (fgBBisScratch(block)) { return false; } // Don't compact away any loop entry blocks that we added in optCanonicalizeLoops if (optIsLoopEntry(block)) { return false; } #if defined(TARGET_ARM) // We can't compact a finally target block, as we need to generate special code for such blocks during code // generation if ((bNext->bbFlags & BBF_FINALLY_TARGET) != 0) return false; #endif // We don't want to compact blocks that are in different Hot/Cold regions // if (fgInDifferentRegions(block, bNext)) { return false; } // We cannot compact two blocks in different EH regions. // if (fgCanRelocateEHRegions) { if (!BasicBlock::sameEHRegion(block, bNext)) { return false; } } // If there is a switch predecessor don't bother because we'd have to update the uniquesuccs as well // (if they are valid). for (BasicBlock* const predBlock : bNext->PredBlocks()) { if (predBlock->bbJumpKind == BBJ_SWITCH) { return false; } } return true; } //------------------------------------------------------------- // fgCompactBlocks: Compact two blocks into one. // // Assumes that all necessary checks have been performed, i.e. fgCanCompactBlocks returns true. // // Uses for this function - whenever we change links, insert blocks, ... // It will keep the flowgraph data in synch - bbNum, bbRefs, bbPreds // // Arguments: // block - move all code into this block. // bNext - bbNext of `block`. This block will be removed. // void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) { noway_assert(block != nullptr); noway_assert((block->bbFlags & BBF_REMOVED) == 0); noway_assert(block->bbJumpKind == BBJ_NONE); noway_assert(bNext == block->bbNext); noway_assert(bNext != nullptr); noway_assert((bNext->bbFlags & BBF_REMOVED) == 0); noway_assert(bNext->countOfInEdges() == 1 || block->isEmpty()); noway_assert(bNext->bbPreds); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) noway_assert((bNext->bbFlags & BBF_FINALLY_TARGET) == 0); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Make sure the second block is not the start of a TRY block or an exception handler noway_assert(bNext->bbCatchTyp == BBCT_NONE); noway_assert((bNext->bbFlags & BBF_TRY_BEG) == 0); noway_assert((bNext->bbFlags & BBF_DONT_REMOVE) == 0); /* both or none must have an exception handler */ noway_assert(block->hasTryIndex() == bNext->hasTryIndex()); #ifdef DEBUG if (verbose) { printf("\nCompacting blocks " FMT_BB " and " FMT_BB ":\n", block->bbNum, bNext->bbNum); } #endif if (bNext->countOfInEdges() > 1) { JITDUMP("Second block has multiple incoming edges\n"); assert(block->isEmpty()); for (BasicBlock* const predBlock : bNext->PredBlocks()) { fgReplaceJumpTarget(predBlock, block, bNext); if (predBlock != block) { fgAddRefPred(block, predBlock); } } bNext->bbPreds = nullptr; // `block` can no longer be a loop pre-header (if it was before). block->bbFlags &= ~BBF_LOOP_PREHEADER; } else { noway_assert(bNext->bbPreds->flNext == nullptr); noway_assert(bNext->bbPreds->getBlock() == block); } /* Start compacting - move all the statements in the second block to the first block */ // First move any phi definitions of the second block after the phi defs of the first. // TODO-CQ: This may be the wrong thing to do. If we're compacting blocks, it's because a // control-flow choice was constant-folded away. So probably phi's need to go away, // as well, in favor of one of the incoming branches. Or at least be modified. assert(block->IsLIR() == bNext->IsLIR()); if (block->IsLIR()) { LIR::Range& blockRange = LIR::AsRange(block); LIR::Range& nextRange = LIR::AsRange(bNext); // Does the next block have any phis? GenTree* nextNode = nextRange.FirstNode(); // Does the block have any code? if (nextNode != nullptr) { LIR::Range nextNodes = nextRange.Remove(nextNode, nextRange.LastNode()); blockRange.InsertAtEnd(std::move(nextNodes)); } } else { Statement* blkNonPhi1 = block->FirstNonPhiDef(); Statement* bNextNonPhi1 = bNext->FirstNonPhiDef(); Statement* blkFirst = block->firstStmt(); Statement* bNextFirst = bNext->firstStmt(); // Does the second have any phis? if (bNextFirst != nullptr && bNextFirst != bNextNonPhi1) { Statement* bNextLast = bNextFirst->GetPrevStmt(); assert(bNextLast->GetNextStmt() == nullptr); // Does "blk" have phis? if (blkNonPhi1 != blkFirst) { // Yes, has phis. // Insert after the last phi of "block." // First, bNextPhis after last phi of block. Statement* blkLastPhi; if (blkNonPhi1 != nullptr) { blkLastPhi = blkNonPhi1->GetPrevStmt(); } else { blkLastPhi = blkFirst->GetPrevStmt(); } blkLastPhi->SetNextStmt(bNextFirst); bNextFirst->SetPrevStmt(blkLastPhi); // Now, rest of "block" after last phi of "bNext". Statement* bNextLastPhi = nullptr; if (bNextNonPhi1 != nullptr) { bNextLastPhi = bNextNonPhi1->GetPrevStmt(); } else { bNextLastPhi = bNextFirst->GetPrevStmt(); } bNextLastPhi->SetNextStmt(blkNonPhi1); if (blkNonPhi1 != nullptr) { blkNonPhi1->SetPrevStmt(bNextLastPhi); } else { // block has no non phis, so make the last statement be the last added phi. blkFirst->SetPrevStmt(bNextLastPhi); } // Now update the bbStmtList of "bNext". bNext->bbStmtList = bNextNonPhi1; if (bNextNonPhi1 != nullptr) { bNextNonPhi1->SetPrevStmt(bNextLast); } } else { if (blkFirst != nullptr) // If "block" has no statements, fusion will work fine... { // First, bNextPhis at start of block. Statement* blkLast = blkFirst->GetPrevStmt(); block->bbStmtList = bNextFirst; // Now, rest of "block" (if it exists) after last phi of "bNext". Statement* bNextLastPhi = nullptr; if (bNextNonPhi1 != nullptr) { // There is a first non phi, so the last phi is before it. bNextLastPhi = bNextNonPhi1->GetPrevStmt(); } else { // All the statements are phi defns, so the last one is the prev of the first. bNextLastPhi = bNextFirst->GetPrevStmt(); } bNextFirst->SetPrevStmt(blkLast); bNextLastPhi->SetNextStmt(blkFirst); blkFirst->SetPrevStmt(bNextLastPhi); // Now update the bbStmtList of "bNext" bNext->bbStmtList = bNextNonPhi1; if (bNextNonPhi1 != nullptr) { bNextNonPhi1->SetPrevStmt(bNextLast); } } } } // Now proceed with the updated bbTreeLists. Statement* stmtList1 = block->firstStmt(); Statement* stmtList2 = bNext->firstStmt(); /* the block may have an empty list */ if (stmtList1 != nullptr) { Statement* stmtLast1 = block->lastStmt(); /* The second block may be a GOTO statement or something with an empty bbStmtList */ if (stmtList2 != nullptr) { Statement* stmtLast2 = bNext->lastStmt(); /* append list2 to list 1 */ stmtLast1->SetNextStmt(stmtList2); stmtList2->SetPrevStmt(stmtLast1); stmtList1->SetPrevStmt(stmtLast2); } } else { /* block was formerly empty and now has bNext's statements */ block->bbStmtList = stmtList2; } } // If either block or bNext has a profile weight // or if both block and bNext have non-zero weights // then we will use the max weight for the block. // const bool hasProfileWeight = block->hasProfileWeight() || bNext->hasProfileWeight(); const bool hasNonZeroWeight = (block->bbWeight > BB_ZERO_WEIGHT) || (bNext->bbWeight > BB_ZERO_WEIGHT); if (hasProfileWeight || hasNonZeroWeight) { weight_t const newWeight = max(block->bbWeight, bNext->bbWeight); if (hasProfileWeight) { block->setBBProfileWeight(newWeight); } else { assert(newWeight != BB_ZERO_WEIGHT); block->bbWeight = newWeight; block->bbFlags &= ~BBF_RUN_RARELY; } } // otherwise if either block has a zero weight we select the zero weight else { noway_assert((block->bbWeight == BB_ZERO_WEIGHT) || (bNext->bbWeight == BB_ZERO_WEIGHT)); block->bbWeight = BB_ZERO_WEIGHT; block->bbFlags |= BBF_RUN_RARELY; // Set the RarelyRun flag } /* set the right links */ block->bbJumpKind = bNext->bbJumpKind; VarSetOps::AssignAllowUninitRhs(this, block->bbLiveOut, bNext->bbLiveOut); // Update the beginning and ending IL offsets (bbCodeOffs and bbCodeOffsEnd). // Set the beginning IL offset to the minimum, and the ending offset to the maximum, of the respective blocks. // If one block has an unknown offset, we take the other block. // We are merging into 'block', so if its values are correct, just leave them alone. // TODO: we should probably base this on the statements within. if (block->bbCodeOffs == BAD_IL_OFFSET) { block->bbCodeOffs = bNext->bbCodeOffs; // If they are both BAD_IL_OFFSET, this doesn't change anything. } else if (bNext->bbCodeOffs != BAD_IL_OFFSET) { // The are both valid offsets; compare them. if (block->bbCodeOffs > bNext->bbCodeOffs) { block->bbCodeOffs = bNext->bbCodeOffs; } } if (block->bbCodeOffsEnd == BAD_IL_OFFSET) { block->bbCodeOffsEnd = bNext->bbCodeOffsEnd; // If they are both BAD_IL_OFFSET, this doesn't change anything. } else if (bNext->bbCodeOffsEnd != BAD_IL_OFFSET) { // The are both valid offsets; compare them. if (block->bbCodeOffsEnd < bNext->bbCodeOffsEnd) { block->bbCodeOffsEnd = bNext->bbCodeOffsEnd; } } if (((block->bbFlags & BBF_INTERNAL) != 0) && ((bNext->bbFlags & BBF_INTERNAL) == 0)) { // If 'block' is an internal block and 'bNext' isn't, then adjust the flags set on 'block'. block->bbFlags &= ~BBF_INTERNAL; // Clear the BBF_INTERNAL flag block->bbFlags |= BBF_IMPORTED; // Set the BBF_IMPORTED flag } /* Update the flags for block with those found in bNext */ block->bbFlags |= (bNext->bbFlags & BBF_COMPACT_UPD); /* mark bNext as removed */ bNext->bbFlags |= BBF_REMOVED; /* Unlink bNext and update all the marker pointers if necessary */ fgUnlinkRange(block->bbNext, bNext); // If bNext was the last block of a try or handler, update the EH table. ehUpdateForDeletedBlock(bNext); /* Set the jump targets */ switch (bNext->bbJumpKind) { case BBJ_CALLFINALLY: // Propagate RETLESS property block->bbFlags |= (bNext->bbFlags & BBF_RETLESS_CALL); FALLTHROUGH; case BBJ_COND: case BBJ_ALWAYS: case BBJ_EHCATCHRET: block->bbJumpDest = bNext->bbJumpDest; /* Update the predecessor list for 'bNext->bbJumpDest' */ fgReplacePred(bNext->bbJumpDest, bNext, block); /* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */ if (bNext->bbJumpKind == BBJ_COND && bNext->bbJumpDest != bNext->bbNext) { fgReplacePred(bNext->bbNext, bNext, block); } break; case BBJ_NONE: /* Update the predecessor list for 'bNext->bbNext' */ fgReplacePred(bNext->bbNext, bNext, block); break; case BBJ_EHFILTERRET: fgReplacePred(bNext->bbJumpDest, bNext, block); break; case BBJ_EHFINALLYRET: { unsigned hndIndex = block->getHndIndex(); EHblkDsc* ehDsc = ehGetDsc(hndIndex); if (ehDsc->HasFinallyHandler()) // No need to do this for fault handlers { BasicBlock* begBlk; BasicBlock* endBlk; ehGetCallFinallyBlockRange(hndIndex, &begBlk, &endBlk); BasicBlock* finBeg = ehDsc->ebdHndBeg; for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } noway_assert(bcall->isBBCallAlwaysPair()); fgReplacePred(bcall->bbNext, bNext, block); } } } break; case BBJ_THROW: case BBJ_RETURN: /* no jumps or fall through blocks to set here */ break; case BBJ_SWITCH: block->bbJumpSwt = bNext->bbJumpSwt; // We are moving the switch jump from bNext to block. Examine the jump targets // of the BBJ_SWITCH at bNext and replace the predecessor to 'bNext' with ones to 'block' fgChangeSwitchBlock(bNext, block); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } if (bNext->isLoopAlign()) { block->bbFlags |= BBF_LOOP_ALIGN; JITDUMP("Propagating LOOP_ALIGN flag from " FMT_BB " to " FMT_BB " during compacting.\n", bNext->bbNum, block->bbNum); } // If we're collapsing a block created after the dominators are // computed, copy block number the block and reuse dominator // information from bNext to block. // // Note we have to do this renumbering after the full set of pred list // updates above, since those updates rely on stable bbNums; if we renumber // before the updates, we can create pred lists with duplicate m_block->bbNum // values (though different m_blocks). // if (fgDomsComputed && (block->bbNum > fgDomBBcount)) { BlockSetOps::Assign(this, block->bbReach, bNext->bbReach); BlockSetOps::ClearD(this, bNext->bbReach); block->bbIDom = bNext->bbIDom; bNext->bbIDom = nullptr; // In this case, there's no need to update the preorder and postorder numbering // since we're changing the bbNum, this makes the basic block all set. // JITDUMP("Renumbering " FMT_BB " to be " FMT_BB " to preserve dominator information\n", block->bbNum, bNext->bbNum); block->bbNum = bNext->bbNum; // Because we may have reordered pred lists when we swapped in // block for bNext above, we now need to re-reorder pred lists // to reflect the bbNum update. // // This process of reordering and re-reordering could likely be avoided // via a different update strategy. But because it's probably rare, // and we avoid most of the work if pred lists are already in order, // we'll just ensure everything is properly ordered. // for (BasicBlock* const checkBlock : Blocks()) { checkBlock->ensurePredListOrder(this); } } fgUpdateLoopsAfterCompacting(block, bNext); #if DEBUG if (verbose && 0) { printf("\nAfter compacting:\n"); fgDispBasicBlocks(false); } #endif #if DEBUG if (JitConfig.JitSlowDebugChecksEnabled() != 0) { // Make sure that the predecessor lists are accurate fgDebugCheckBBlist(); } #endif // DEBUG } //------------------------------------------------------------- // fgUpdateLoopsAfterCompacting: Update the loop table after block compaction. // // Arguments: // block - target of compaction. // bNext - bbNext of `block`. This block has been removed. // void Compiler::fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext) { /* Check if the removed block is not part the loop table */ noway_assert(bNext); for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { /* Some loops may have been already removed by * loop unrolling or conditional folding */ if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED) { continue; } /* Check the loop head (i.e. the block preceding the loop) */ if (optLoopTable[loopNum].lpHead == bNext) { optLoopTable[loopNum].lpHead = block; } /* Check the loop bottom */ if (optLoopTable[loopNum].lpBottom == bNext) { optLoopTable[loopNum].lpBottom = block; } /* Check the loop exit */ if (optLoopTable[loopNum].lpExit == bNext) { noway_assert(optLoopTable[loopNum].lpExitCnt == 1); optLoopTable[loopNum].lpExit = block; } /* Check the loop entry */ if (optLoopTable[loopNum].lpEntry == bNext) { optLoopTable[loopNum].lpEntry = block; } /* Check the loop top */ if (optLoopTable[loopNum].lpTop == bNext) { optLoopTable[loopNum].lpTop = block; } } } //------------------------------------------------------------- // fgUnreachableBlock: Remove a block when it is unreachable. // // This function cannot remove the first block. // // Arguments: // block - unreachable block to remove // void Compiler::fgUnreachableBlock(BasicBlock* block) { // genReturnBB should never be removed, as we might have special hookups there. // Therefore, we should never come here to remove the statements in the genReturnBB block. // For example, the profiler hookup needs to have the "void GT_RETURN" statement // to properly set the info.compProfilerCallback flag. noway_assert(block != genReturnBB); if (block->bbFlags & BBF_REMOVED) { return; } #ifdef DEBUG if (verbose) { printf("\nRemoving unreachable " FMT_BB "\n", block->bbNum); } #endif // DEBUG noway_assert(block->bbPrev != nullptr); // Can't use this function to remove the first block #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) assert(!block->isBBCallAlwaysPairTail()); // can't remove the BBJ_ALWAYS of a BBJ_CALLFINALLY / BBJ_ALWAYS pair #endif // First, delete all the code in the block. if (block->IsLIR()) { LIR::Range& blockRange = LIR::AsRange(block); if (!blockRange.IsEmpty()) { blockRange.Delete(this, block, blockRange.FirstNode(), blockRange.LastNode()); } } else { // TODO-Cleanup: I'm not sure why this happens -- if the block is unreachable, why does it have phis? // Anyway, remove any phis. Statement* firstNonPhi = block->FirstNonPhiDef(); if (block->bbStmtList != firstNonPhi) { if (firstNonPhi != nullptr) { firstNonPhi->SetPrevStmt(block->lastStmt()); } block->bbStmtList = firstNonPhi; } for (Statement* const stmt : block->Statements()) { fgRemoveStmt(block, stmt); } noway_assert(block->bbStmtList == nullptr); } // Next update the loop table and bbWeights optUpdateLoopsBeforeRemoveBlock(block); // Mark the block as removed block->bbFlags |= BBF_REMOVED; // Update bbRefs and bbPreds for the blocks reached by this block fgRemoveBlockAsPred(block); } //------------------------------------------------------------- // fgRemoveConditionalJump: Remove or morph a jump when we jump to the same // block when both the condition is true or false. Remove the branch condition, // but leave any required side effects. // // Arguments: // block - block with conditional branch // void Compiler::fgRemoveConditionalJump(BasicBlock* block) { noway_assert(block->bbJumpKind == BBJ_COND && block->bbJumpDest == block->bbNext); assert(compRationalIRForm == block->IsLIR()); flowList* flow = fgGetPredForBlock(block->bbNext, block); noway_assert(flow->flDupCount == 2); // Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount. block->bbJumpKind = BBJ_NONE; --block->bbNext->bbRefs; --flow->flDupCount; #ifdef DEBUG block->bbJumpDest = nullptr; if (verbose) { printf("Block " FMT_BB " becoming a BBJ_NONE to " FMT_BB " (jump target is the same whether the condition" " is true or false)\n", block->bbNum, block->bbNext->bbNum); } #endif // Remove the block jump condition if (block->IsLIR()) { LIR::Range& blockRange = LIR::AsRange(block); GenTree* test = blockRange.LastNode(); assert(test->OperIsConditionalJump()); bool isClosed; unsigned sideEffects; LIR::ReadOnlyRange testRange = blockRange.GetTreeRange(test, &isClosed, &sideEffects); // TODO-LIR: this should really be checking GTF_ALL_EFFECT, but that produces unacceptable // diffs compared to the existing backend. if (isClosed && ((sideEffects & GTF_SIDE_EFFECT) == 0)) { // If the jump and its operands form a contiguous, side-effect-free range, // remove them. blockRange.Delete(this, block, std::move(testRange)); } else { // Otherwise, just remove the jump node itself. blockRange.Remove(test, true); } } else { Statement* test = block->lastStmt(); GenTree* tree = test->GetRootNode(); noway_assert(tree->gtOper == GT_JTRUE); GenTree* sideEffList = nullptr; if (tree->gtFlags & GTF_SIDE_EFFECT) { gtExtractSideEffList(tree, &sideEffList); if (sideEffList) { noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT); #ifdef DEBUG if (verbose) { printf("Extracted side effects list from condition...\n"); gtDispTree(sideEffList); printf("\n"); } #endif } } // Delete the cond test or replace it with the side effect tree if (sideEffList == nullptr) { fgRemoveStmt(block, test); } else { test->SetRootNode(sideEffList); if (fgStmtListThreaded) { gtSetStmtInfo(test); fgSetStmtSeq(test); } } } } //------------------------------------------------------------- // fgOptimizeBranchToEmptyUnconditional: // Optimize a jump to an empty block which ends in an unconditional branch. // // Arguments: // block - source block // bDest - destination // // Returns: true if changes were made // bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest) { bool optimizeJump = true; assert(bDest->isEmpty()); assert(bDest->bbJumpKind == BBJ_ALWAYS); // We do not optimize jumps between two different try regions. // However jumping to a block that is not in any try region is OK // if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) { optimizeJump = false; } // Don't optimize a jump to a removed block if (bDest->bbJumpDest->bbFlags & BBF_REMOVED) { optimizeJump = false; } // Don't optimize a jump to a cloned finally if (bDest->bbFlags & BBF_CLONED_FINALLY_BEGIN) { optimizeJump = false; } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't optimize a jump to a finally target. For BB1->BB2->BB3, where // BB2 is a finally target, if we changed BB1 to jump directly to BB3, // it would skip the finally target. BB1 might be a BBJ_ALWAYS block part // of a BBJ_CALLFINALLY/BBJ_ALWAYS pair, so changing the finally target // would change the unwind behavior. if (bDest->bbFlags & BBF_FINALLY_TARGET) { optimizeJump = false; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Must optimize jump if bDest has been removed // if (bDest->bbFlags & BBF_REMOVED) { optimizeJump = true; } // If we are optimizing using real profile weights // then don't optimize a conditional jump to an unconditional jump // until after we have computed the edge weights // if (fgIsUsingProfileWeights() && !fgEdgeWeightsComputed) { fgNeedsUpdateFlowGraph = true; optimizeJump = false; } if (optimizeJump) { #ifdef DEBUG if (verbose) { printf("\nOptimizing a jump to an unconditional jump (" FMT_BB " -> " FMT_BB " -> " FMT_BB ")\n", block->bbNum, bDest->bbNum, bDest->bbJumpDest->bbNum); } #endif // DEBUG // // When we optimize a branch to branch we need to update the profile weight // of bDest by subtracting out the block/edge weight of the path that is being optimized. // if (fgHaveValidEdgeWeights && bDest->hasProfileWeight()) { flowList* edge1 = fgGetPredForBlock(bDest, block); noway_assert(edge1 != nullptr); weight_t edgeWeight; if (edge1->edgeWeightMin() != edge1->edgeWeightMax()) { // // We only have an estimate for the edge weight // edgeWeight = (edge1->edgeWeightMin() + edge1->edgeWeightMax()) / 2; // // Clear the profile weight flag // bDest->bbFlags &= ~BBF_PROF_WEIGHT; } else { // // We only have the exact edge weight // edgeWeight = edge1->edgeWeightMin(); } // // Update the bDest->bbWeight // if (bDest->bbWeight > edgeWeight) { bDest->bbWeight -= edgeWeight; } else { bDest->bbWeight = BB_ZERO_WEIGHT; bDest->bbFlags |= BBF_RUN_RARELY; // Set the RarelyRun flag } flowList* edge2 = fgGetPredForBlock(bDest->bbJumpDest, bDest); if (edge2 != nullptr) { // // Update the edge2 min/max weights // weight_t newEdge2Min; weight_t newEdge2Max; if (edge2->edgeWeightMin() > edge1->edgeWeightMin()) { newEdge2Min = edge2->edgeWeightMin() - edge1->edgeWeightMin(); } else { newEdge2Min = BB_ZERO_WEIGHT; } if (edge2->edgeWeightMax() > edge1->edgeWeightMin()) { newEdge2Max = edge2->edgeWeightMax() - edge1->edgeWeightMin(); } else { newEdge2Max = BB_ZERO_WEIGHT; } edge2->setEdgeWeights(newEdge2Min, newEdge2Max, bDest); } } // Optimize the JUMP to empty unconditional JUMP to go to the new target block->bbJumpDest = bDest->bbJumpDest; fgAddRefPred(bDest->bbJumpDest, block, fgRemoveRefPred(bDest, block)); return true; } return false; } //------------------------------------------------------------- // fgOptimizeEmptyBlock: // Does flow optimization of an empty block (can remove it in some cases) // // Arguments: // block - an empty block // // Returns: true if changes were made // bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) { assert(block->isEmpty()); BasicBlock* bPrev = block->bbPrev; switch (block->bbJumpKind) { case BBJ_COND: case BBJ_SWITCH: /* can never happen */ noway_assert(!"Conditional or switch block with empty body!"); break; case BBJ_THROW: case BBJ_CALLFINALLY: case BBJ_RETURN: case BBJ_EHCATCHRET: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: /* leave them as is */ /* some compilers generate multiple returns and put all of them at the end - * to solve that we need the predecessor list */ break; case BBJ_ALWAYS: // A GOTO cannot be to the next block since that // should have been fixed by the optimization above // An exception is made for a jump from Hot to Cold noway_assert(block->bbJumpDest != block->bbNext || block->isBBCallAlwaysPairTail() || fgInDifferentRegions(block, block->bbNext)); /* Cannot remove the first BB */ if (!bPrev) { break; } /* Do not remove a block that jumps to itself - used for while (true){} */ if (block->bbJumpDest == block) { break; } /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ if (bPrev->bbJumpKind != BBJ_NONE) { break; } // can't allow fall through into cold code if (block->bbNext == fgFirstColdBlock) { break; } /* Can fall through since this is similar with removing * a BBJ_NONE block, only the successor is different */ FALLTHROUGH; case BBJ_NONE: /* special case if this is the first BB */ if (!bPrev) { assert(block == fgFirstBB); } else { /* If this block follows a BBJ_CALLFINALLY do not remove it * (because we don't know who may jump to it) */ if (bPrev->bbJumpKind == BBJ_CALLFINALLY) { break; } } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) /* Don't remove finally targets */ if (block->bbFlags & BBF_FINALLY_TARGET) break; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #if defined(FEATURE_EH_FUNCLETS) /* Don't remove an empty block that is in a different EH region * from its successor block, if the block is the target of a * catch return. It is required that the return address of a * catch be in the correct EH region, for re-raise of thread * abort exceptions to work. Insert a NOP in the empty block * to ensure we generate code for the block, if we keep it. */ { BasicBlock* succBlock; if (block->bbJumpKind == BBJ_ALWAYS) { succBlock = block->bbJumpDest; } else { succBlock = block->bbNext; } if ((succBlock != nullptr) && !BasicBlock::sameEHRegion(block, succBlock)) { // The empty block and the block that follows it are in different // EH regions. Is this a case where they can't be merged? bool okToMerge = true; // assume it's ok for (BasicBlock* const predBlock : block->PredBlocks()) { if (predBlock->bbJumpKind == BBJ_EHCATCHRET) { assert(predBlock->bbJumpDest == block); okToMerge = false; // we can't get rid of the empty block break; } } if (!okToMerge) { // Insert a NOP in the empty block to ensure we generate code // for the catchret target in the right EH region. GenTree* nop = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); if (block->IsLIR()) { LIR::AsRange(block).InsertAtEnd(nop); LIR::ReadOnlyRange range(nop, nop); m_pLowering->LowerRange(block, range); } else { Statement* nopStmt = fgNewStmtAtEnd(block, nop); fgSetStmtSeq(nopStmt); gtSetStmtInfo(nopStmt); } #ifdef DEBUG if (verbose) { printf("\nKeeping empty block " FMT_BB " - it is the target of a catch return\n", block->bbNum); } #endif // DEBUG break; // go to the next block } } } #endif // FEATURE_EH_FUNCLETS if (!ehCanDeleteEmptyBlock(block)) { // We're not allowed to remove this block due to reasons related to the EH table. break; } /* special case if this is the last BB */ if (block == fgLastBB) { if (!bPrev) { break; } fgLastBB = bPrev; } // When using profile weights, fgComputeEdgeWeights expects the first non-internal block to have profile // weight. // Make sure we don't break that invariant. if (fgIsUsingProfileWeights() && block->hasProfileWeight() && (block->bbFlags & BBF_INTERNAL) == 0) { BasicBlock* bNext = block->bbNext; // Check if the next block can't maintain the invariant. if ((bNext == nullptr) || ((bNext->bbFlags & BBF_INTERNAL) != 0) || !bNext->hasProfileWeight()) { // Check if the current block is the first non-internal block. BasicBlock* curBB = bPrev; while ((curBB != nullptr) && (curBB->bbFlags & BBF_INTERNAL) != 0) { curBB = curBB->bbPrev; } if (curBB == nullptr) { // This block is the first non-internal block and it has profile weight. // Don't delete it. break; } } } /* Remove the block */ compCurBB = block; fgRemoveBlock(block, /* unreachable */ false); return true; default: noway_assert(!"Unexpected bbJumpKind"); break; } return false; } //------------------------------------------------------------- // fgOptimizeSwitchBranches: // Does flow optimization for a switch - bypasses jumps to empty unconditional branches, // and transforms degenerate switch cases like those with 1 or 2 targets. // // Arguments: // block - block with switch // // Returns: true if changes were made // bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) { assert(block->bbJumpKind == BBJ_SWITCH); unsigned jmpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jmpTab = block->bbJumpSwt->bbsDstTab; BasicBlock* bNewDest; // the new jump target for the current switch case BasicBlock* bDest; bool returnvalue = false; do { REPEAT_SWITCH:; bDest = *jmpTab; bNewDest = bDest; // Do we have a JUMP to an empty unconditional JUMP block? if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { bool optimizeJump = true; // We do not optimize jumps between two different try regions. // However jumping to a block that is not in any try region is OK // if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) { optimizeJump = false; } // If we are optimize using real profile weights // then don't optimize a switch jump to an unconditional jump // until after we have computed the edge weights // if (fgIsUsingProfileWeights() && !fgEdgeWeightsComputed) { fgNeedsUpdateFlowGraph = true; optimizeJump = false; } if (optimizeJump) { bNewDest = bDest->bbJumpDest; #ifdef DEBUG if (verbose) { printf("\nOptimizing a switch jump to an empty block with an unconditional jump (" FMT_BB " -> " FMT_BB " -> " FMT_BB ")\n", block->bbNum, bDest->bbNum, bNewDest->bbNum); } #endif // DEBUG } } if (bNewDest != bDest) { // // When we optimize a branch to branch we need to update the profile weight // of bDest by subtracting out the block/edge weight of the path that is being optimized. // if (fgIsUsingProfileWeights() && bDest->hasProfileWeight()) { if (fgHaveValidEdgeWeights) { flowList* edge = fgGetPredForBlock(bDest, block); weight_t branchThroughWeight = edge->edgeWeightMin(); if (bDest->bbWeight > branchThroughWeight) { bDest->bbWeight -= branchThroughWeight; } else { bDest->bbWeight = BB_ZERO_WEIGHT; bDest->bbFlags |= BBF_RUN_RARELY; } } } // Update the switch jump table *jmpTab = bNewDest; // Maintain, if necessary, the set of unique targets of "block." UpdateSwitchTableTarget(block, bDest, bNewDest); fgAddRefPred(bNewDest, block, fgRemoveRefPred(bDest, block)); // we optimized a Switch label - goto REPEAT_SWITCH to follow this new jump returnvalue = true; goto REPEAT_SWITCH; } } while (++jmpTab, --jmpCnt); Statement* switchStmt = nullptr; LIR::Range* blockRange = nullptr; GenTree* switchTree; if (block->IsLIR()) { blockRange = &LIR::AsRange(block); switchTree = blockRange->LastNode(); assert(switchTree->OperGet() == GT_SWITCH_TABLE); } else { switchStmt = block->lastStmt(); switchTree = switchStmt->GetRootNode(); assert(switchTree->OperGet() == GT_SWITCH); } noway_assert(switchTree->gtType == TYP_VOID); // At this point all of the case jump targets have been updated such // that none of them go to block that is an empty unconditional block // jmpTab = block->bbJumpSwt->bbsDstTab; jmpCnt = block->bbJumpSwt->bbsCount; // Now check for two trivial switch jumps. // if (block->NumSucc(this) == 1) { // Use BBJ_ALWAYS for a switch with only a default clause, or with only one unique successor. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("\nRemoving a switch jump with a single target (" FMT_BB ")\n", block->bbNum); printf("BEFORE:\n"); } #endif // DEBUG if (block->IsLIR()) { bool isClosed; unsigned sideEffects; LIR::ReadOnlyRange switchTreeRange = blockRange->GetTreeRange(switchTree, &isClosed, &sideEffects); // The switch tree should form a contiguous, side-effect free range by construction. See // Lowering::LowerSwitch for details. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); blockRange->Delete(this, block, std::move(switchTreeRange)); } else { /* check for SIDE_EFFECTS */ if (switchTree->gtFlags & GTF_SIDE_EFFECT) { /* Extract the side effects from the conditional */ GenTree* sideEffList = nullptr; gtExtractSideEffList(switchTree, &sideEffList); if (sideEffList == nullptr) { goto NO_SWITCH_SIDE_EFFECT; } noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT); #ifdef DEBUG if (verbose) { printf("\nSwitch expression has side effects! Extracting side effects...\n"); gtDispTree(switchTree); printf("\n"); gtDispTree(sideEffList); printf("\n"); } #endif // DEBUG /* Replace the conditional statement with the list of side effects */ noway_assert(sideEffList->gtOper != GT_SWITCH); switchStmt->SetRootNode(sideEffList); if (fgStmtListThreaded) { compCurBB = block; /* Update ordering, costs, FP levels, etc. */ gtSetStmtInfo(switchStmt); /* Re-link the nodes for this statement */ fgSetStmtSeq(switchStmt); } } else { NO_SWITCH_SIDE_EFFECT: /* conditional has NO side effect - remove it */ fgRemoveStmt(block, switchStmt); } } // Change the switch jump into a BBJ_ALWAYS block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; block->bbJumpKind = BBJ_ALWAYS; if (jmpCnt > 1) { for (unsigned i = 1; i < jmpCnt; ++i) { (void)fgRemoveRefPred(jmpTab[i], block); } } return true; } else if (block->bbJumpSwt->bbsCount == 2 && block->bbJumpSwt->bbsDstTab[1] == block->bbNext) { /* Use a BBJ_COND(switchVal==0) for a switch with only one significant clause besides the default clause, if the default clause is bbNext */ GenTree* switchVal = switchTree->AsOp()->gtOp1; noway_assert(genActualTypeIsIntOrI(switchVal->TypeGet())); // If we are in LIR, remove the jump table from the block. if (block->IsLIR()) { GenTree* jumpTable = switchTree->AsOp()->gtOp2; assert(jumpTable->OperGet() == GT_JMPTABLE); blockRange->Remove(jumpTable); } // Change the GT_SWITCH(switchVal) into GT_JTRUE(GT_EQ(switchVal==0)). // Also mark the node as GTF_DONT_CSE as further down JIT is not capable of handling it. // For example CSE could determine that the expression rooted at GT_EQ is a candidate cse and // replace it with a COMMA node. In such a case we will end up with GT_JTRUE node pointing to // a COMMA node which results in noway asserts in fgMorphSmpOp(), optAssertionGen() and rpPredictTreeRegUse(). // For the same reason fgMorphSmpOp() marks GT_JTRUE nodes with RELOP children as GTF_DONT_CSE. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("\nConverting a switch (" FMT_BB ") with only one significant clause besides a default target to a " "conditional branch\n", block->bbNum); } #endif // DEBUG switchTree->ChangeOper(GT_JTRUE); GenTree* zeroConstNode = gtNewZeroConNode(genActualType(switchVal->TypeGet())); GenTree* condNode = gtNewOperNode(GT_EQ, TYP_INT, switchVal, zeroConstNode); switchTree->AsOp()->gtOp1 = condNode; switchTree->AsOp()->gtOp1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE); if (block->IsLIR()) { blockRange->InsertAfter(switchVal, zeroConstNode, condNode); LIR::ReadOnlyRange range(zeroConstNode, switchTree); m_pLowering->LowerRange(block, range); } else if (fgStmtListThreaded) { gtSetStmtInfo(switchStmt); fgSetStmtSeq(switchStmt); } block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; block->bbJumpKind = BBJ_COND; return true; } return returnvalue; } //------------------------------------------------------------- // fgBlockEndFavorsTailDuplication: // Heuristic function that returns true if this block ends in a statement that looks favorable // for tail-duplicating its successor (such as assigning a constant to a local). // // Arguments: // block: BasicBlock we are considering duplicating the successor of // lclNum: local that is used by the successor block, provided by // prior call to fgBlockIsGoodTailDuplicationCandidate // // Returns: // true if block end is favorable for tail duplication // // Notes: // This is the second half of the evaluation for tail duplication, where we try // to determine if this predecessor block assigns a constant or provides useful // information about a local that is tested in an unconditionally executed successor. // If so then duplicating the successor will likely allow the test to be // optimized away. // bool Compiler::fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum) { if (block->isRunRarely()) { return false; } // If the local is address exposed, we currently can't optimize. // LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (lclDsc->IsAddressExposed()) { return false; } Statement* const lastStmt = block->lastStmt(); Statement* const firstStmt = block->FirstNonPhiDef(); if (lastStmt == nullptr) { return false; } // Tail duplication tends to pay off when the last statement // is an assignment of a constant, arraylength, or a relop. // This is because these statements produce information about values // that would otherwise be lost at the upcoming merge point. // // Check up to N statements... // const int limit = 2; int count = 0; Statement* stmt = lastStmt; while (count < limit) { count++; GenTree* const tree = stmt->GetRootNode(); if (tree->OperIs(GT_ASG) && !tree->OperIsBlkOp()) { GenTree* const op1 = tree->AsOp()->gtOp1; if (op1->IsLocal()) { const unsigned op1LclNum = op1->AsLclVarCommon()->GetLclNum(); if (op1LclNum == lclNum) { GenTree* const op2 = tree->AsOp()->gtOp2; if (op2->OperIs(GT_ARR_LENGTH) || op2->OperIsConst() || op2->OperIsCompare()) { return true; } } } } Statement* const prevStmt = stmt->GetPrevStmt(); // The statement list prev links wrap from first->last, so exit // when we see lastStmt again, as we've now seen all statements. // if (prevStmt == lastStmt) { break; } stmt = prevStmt; } return false; } //------------------------------------------------------------- // fgBlockIsGoodTailDuplicationCandidate: // Heuristic function that examines a block (presumably one that is a merge point) to determine // if it is a good candidate to be duplicated. // // Arguments: // target - the tail block (candidate for duplication) // // Returns: // true if this is a good candidate, false otherwise // if true, lclNum is set to lcl to scan for in predecessor block // // Notes: // The current heuristic is that tail duplication is deemed favorable if this // block simply tests the value of a local against a constant or some other local. // // This is the first half of the evaluation for tail duplication. We subsequently // need to check if predecessors of this block assigns a constant to the local. // bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock* target, unsigned* lclNum) { *lclNum = BAD_VAR_NUM; // Here we are looking for small blocks where a local live-into the block // ultimately feeds a simple conditional branch. // // These blocks are small, and when duplicated onto the tail of blocks that end in // assignments, there is a high probability of the branch completely going away. // // This is by no means the only kind of tail that it is beneficial to duplicate, // just the only one we recognize for now. if (target->bbJumpKind != BBJ_COND) { return false; } // No point duplicating this block if it's not a control flow join. if (target->bbRefs < 2) { return false; } Statement* const lastStmt = target->lastStmt(); Statement* const firstStmt = target->FirstNonPhiDef(); // We currently allow just one statement aside from the branch. // if ((firstStmt != lastStmt) && (firstStmt != lastStmt->GetPrevStmt())) { return false; } // Verify the branch is just a simple local compare. // GenTree* const lastTree = lastStmt->GetRootNode(); if (lastTree->gtOper != GT_JTRUE) { return false; } // must be some kind of relational operator GenTree* const cond = lastTree->AsOp()->gtOp1; if (!cond->OperIsCompare()) { return false; } // op1 must be some combinations of casts of local or constant GenTree* op1 = cond->AsOp()->gtOp1; while (op1->gtOper == GT_CAST) { op1 = op1->AsOp()->gtOp1; } if (!op1->IsLocal() && !op1->OperIsConst()) { return false; } // op2 must be some combinations of casts of local or constant GenTree* op2 = cond->AsOp()->gtOp2; while (op2->gtOper == GT_CAST) { op2 = op2->AsOp()->gtOp1; } if (!op2->IsLocal() && !op2->OperIsConst()) { return false; } // Tree must have one constant and one local, or be comparing // the same local to itself. unsigned lcl1 = BAD_VAR_NUM; unsigned lcl2 = BAD_VAR_NUM; if (op1->IsLocal()) { lcl1 = op1->AsLclVarCommon()->GetLclNum(); } if (op2->IsLocal()) { lcl2 = op2->AsLclVarCommon()->GetLclNum(); } if ((lcl1 != BAD_VAR_NUM) && op2->OperIsConst()) { *lclNum = lcl1; } else if ((lcl2 != BAD_VAR_NUM) && op1->OperIsConst()) { *lclNum = lcl2; } else if ((lcl1 != BAD_VAR_NUM) && (lcl1 == lcl2)) { *lclNum = lcl1; } else { return false; } // If there's no second statement, we're good. // if (firstStmt == lastStmt) { return true; } // Otherwise check the first stmt. // Verify the branch is just a simple local compare. // GenTree* const firstTree = firstStmt->GetRootNode(); if (firstTree->gtOper != GT_ASG) { return false; } GenTree* const lhs = firstTree->AsOp()->gtOp1; if (!lhs->OperIs(GT_LCL_VAR)) { return false; } const unsigned lhsLcl = lhs->AsLclVarCommon()->GetLclNum(); if (lhsLcl != *lclNum) { return false; } // Could allow unary here too... // GenTree* const rhs = firstTree->AsOp()->gtOp2; if (!rhs->OperIsBinary()) { return false; } // op1 must be some combinations of casts of local or constant // (or unary) op1 = rhs->AsOp()->gtOp1; while (op1->gtOper == GT_CAST) { op1 = op1->AsOp()->gtOp1; } if (!op1->IsLocal() && !op1->OperIsConst()) { return false; } // op2 must be some combinations of casts of local or constant // (or unary) op2 = rhs->AsOp()->gtOp2; // A binop may not actually have an op2. // if (op2 == nullptr) { return false; } while (op2->gtOper == GT_CAST) { op2 = op2->AsOp()->gtOp1; } if (!op2->IsLocal() && !op2->OperIsConst()) { return false; } // Tree must have one constant and one local, or be comparing // the same local to itself. lcl1 = BAD_VAR_NUM; lcl2 = BAD_VAR_NUM; if (op1->IsLocal()) { lcl1 = op1->AsLclVarCommon()->GetLclNum(); } if (op2->IsLocal()) { lcl2 = op2->AsLclVarCommon()->GetLclNum(); } if ((lcl1 != BAD_VAR_NUM) && op2->OperIsConst()) { *lclNum = lcl1; } else if ((lcl2 != BAD_VAR_NUM) && op1->OperIsConst()) { *lclNum = lcl2; } else if ((lcl1 != BAD_VAR_NUM) && (lcl1 == lcl2)) { *lclNum = lcl1; } else { return false; } return true; } //------------------------------------------------------------- // fgOptimizeUncondBranchToSimpleCond: // For a block which has an unconditional branch, look to see if its target block // is a good candidate for tail duplication, and if so do that duplication. // // Arguments: // block - block with uncond branch // target - block which is target of first block // // Returns: true if changes were made // // Notes: // This optimization generally reduces code size and path length. // bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target) { JITDUMP("Considering uncond to cond " FMT_BB " -> " FMT_BB "\n", block->bbNum, target->bbNum); if (!BasicBlock::sameEHRegion(block, target)) { return false; } if (fgBBisScratch(block)) { return false; } unsigned lclNum = BAD_VAR_NUM; // First check if the successor tests a local and then branches on the result // of a test, and obtain the local if so. // if (!fgBlockIsGoodTailDuplicationCandidate(target, &lclNum)) { return false; } // See if this block assigns constant or other interesting tree to that same local. // if (!fgBlockEndFavorsTailDuplication(block, lclNum)) { return false; } // NOTE: we do not currently hit this assert because this function is only called when // `fgUpdateFlowGraph` has been called with `doTailDuplication` set to true, and the // backend always calls `fgUpdateFlowGraph` with `doTailDuplication` set to false. assert(!block->IsLIR()); // Duplicate the target block at the end of this block // for (Statement* stmt : target->NonPhiStatements()) { GenTree* clone = gtCloneExpr(stmt->GetRootNode()); noway_assert(clone); Statement* cloneStmt = gtNewStmt(clone); if (fgStmtListThreaded) { gtSetStmtInfo(cloneStmt); } fgInsertStmtAtEnd(block, cloneStmt); } // Fix up block's flow // block->bbJumpKind = BBJ_COND; block->bbJumpDest = target->bbJumpDest; fgAddRefPred(block->bbJumpDest, block); fgRemoveRefPred(target, block); // add an unconditional block after this block to jump to the target block's fallthrough block // BasicBlock* next = fgNewBBafter(BBJ_ALWAYS, block, true); // The new block 'next' will inherit its weight from 'block' // next->inheritWeight(block); next->bbJumpDest = target->bbNext; fgAddRefPred(next, block); fgAddRefPred(next->bbJumpDest, next); JITDUMP("fgOptimizeUncondBranchToSimpleCond(from " FMT_BB " to cond " FMT_BB "), created new uncond " FMT_BB "\n", block->bbNum, target->bbNum, next->bbNum); JITDUMP(" expecting opts to key off V%02u in " FMT_BB "\n", lclNum, block->bbNum); return true; } //------------------------------------------------------------- // fgOptimizeBranchToNext: // Optimize a block which has a branch to the following block // // Arguments: // block - block with a branch // bNext - block which is both next and the target of the first block // bPrev - block which is prior to the first block // // Returns: true if changes were made // bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev) { assert(block->KindIs(BBJ_COND, BBJ_ALWAYS)); assert(block->bbJumpDest == bNext); assert(block->bbNext == bNext); assert(block->bbPrev == bPrev); if (block->bbJumpKind == BBJ_ALWAYS) { // We can't remove it if it is a branch from hot => cold if (!fgInDifferentRegions(block, bNext)) { // We can't remove if it is marked as BBF_KEEP_BBJ_ALWAYS if (!(block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // We can't remove if the BBJ_ALWAYS is part of a BBJ_CALLFINALLY pair if (!block->isBBCallAlwaysPairTail()) { /* the unconditional jump is to the next BB */ block->bbJumpKind = BBJ_NONE; #ifdef DEBUG if (verbose) { printf("\nRemoving unconditional jump to next block (" FMT_BB " -> " FMT_BB ") (converted " FMT_BB " to " "fall-through)\n", block->bbNum, bNext->bbNum, block->bbNum); } #endif // DEBUG return true; } } } } else { /* remove the conditional statement at the end of block */ noway_assert(block->bbJumpKind == BBJ_COND); noway_assert(block->isValid()); #ifdef DEBUG if (verbose) { printf("\nRemoving conditional jump to next block (" FMT_BB " -> " FMT_BB ")\n", block->bbNum, bNext->bbNum); } #endif // DEBUG if (block->IsLIR()) { LIR::Range& blockRange = LIR::AsRange(block); GenTree* jmp = blockRange.LastNode(); assert(jmp->OperIsConditionalJump()); if (jmp->OperGet() == GT_JTRUE) { jmp->AsOp()->gtOp1->gtFlags &= ~GTF_SET_FLAGS; } bool isClosed; unsigned sideEffects; LIR::ReadOnlyRange jmpRange = blockRange.GetTreeRange(jmp, &isClosed, &sideEffects); // TODO-LIR: this should really be checking GTF_ALL_EFFECT, but that produces unacceptable // diffs compared to the existing backend. if (isClosed && ((sideEffects & GTF_SIDE_EFFECT) == 0)) { // If the jump and its operands form a contiguous, side-effect-free range, // remove them. blockRange.Delete(this, block, std::move(jmpRange)); } else { // Otherwise, just remove the jump node itself. blockRange.Remove(jmp, true); } } else { Statement* condStmt = block->lastStmt(); GenTree* cond = condStmt->GetRootNode(); noway_assert(cond->gtOper == GT_JTRUE); /* check for SIDE_EFFECTS */ if (cond->gtFlags & GTF_SIDE_EFFECT) { /* Extract the side effects from the conditional */ GenTree* sideEffList = nullptr; gtExtractSideEffList(cond, &sideEffList); if (sideEffList == nullptr) { compCurBB = block; fgRemoveStmt(block, condStmt); } else { noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT); #ifdef DEBUG if (verbose) { printf("\nConditional has side effects! Extracting side effects...\n"); gtDispTree(cond); printf("\n"); gtDispTree(sideEffList); printf("\n"); } #endif // DEBUG /* Replace the conditional statement with the list of side effects */ noway_assert(sideEffList->gtOper != GT_JTRUE); condStmt->SetRootNode(sideEffList); if (fgStmtListThreaded) { compCurBB = block; /* Update ordering, costs, FP levels, etc. */ gtSetStmtInfo(condStmt); /* Re-link the nodes for this statement */ fgSetStmtSeq(condStmt); } } } else { compCurBB = block; /* conditional has NO side effect - remove it */ fgRemoveStmt(block, condStmt); } } /* Conditional is gone - simply fall into the next block */ block->bbJumpKind = BBJ_NONE; /* Update bbRefs and bbNum - Conditional predecessors to the same * block are counted twice so we have to remove one of them */ noway_assert(bNext->countOfInEdges() > 1); fgRemoveRefPred(bNext, block); return true; } return false; } //------------------------------------------------------------- // fgOptimizeBranch: Optimize an unconditional branch that branches to a conditional branch. // // Currently we require that the conditional branch jump back to the block that follows the unconditional // branch. We can improve the code execution and layout by concatenating a copy of the conditional branch // block at the end of the conditional branch and reversing the sense of the branch. // // This is only done when the amount of code to be copied is smaller than our calculated threshold // in maxDupCostSz. // // Arguments: // bJump - block with branch // // Returns: true if changes were made // bool Compiler::fgOptimizeBranch(BasicBlock* bJump) { if (opts.MinOpts()) { return false; } if (bJump->bbJumpKind != BBJ_ALWAYS) { return false; } if (bJump->bbFlags & BBF_KEEP_BBJ_ALWAYS) { return false; } // Don't hoist a conditional branch into the scratch block; we'd prefer it stay // either BBJ_NONE or BBJ_ALWAYS. if (fgBBisScratch(bJump)) { return false; } BasicBlock* bDest = bJump->bbJumpDest; if (bDest->bbJumpKind != BBJ_COND) { return false; } if (bDest->bbJumpDest != bJump->bbNext) { return false; } // 'bJump' must be in the same try region as the condition, since we're going to insert // a duplicated condition in 'bJump', and the condition might include exception throwing code. if (!BasicBlock::sameTryRegion(bJump, bDest)) { return false; } // do not jump into another try region BasicBlock* bDestNext = bDest->bbNext; if (bDestNext->hasTryIndex() && !BasicBlock::sameTryRegion(bJump, bDestNext)) { return false; } // This function is only called by fgReorderBlocks, which we do not run in the backend. // If we wanted to run block reordering in the backend, we would need to be able to // calculate cost information for LIR on a per-node basis in order for this function // to work. assert(!bJump->IsLIR()); assert(!bDest->IsLIR()); unsigned estDupCostSz = 0; for (Statement* const stmt : bDest->Statements()) { // We want to compute the costs of the statement. Unfortunately, gtPrepareCost() / gtSetStmtInfo() // call gtSetEvalOrder(), which can reorder nodes. If it does so, we need to re-thread the gtNext/gtPrev // links. We don't know if it does or doesn't reorder nodes, so we end up always re-threading the links. gtSetStmtInfo(stmt); if (fgStmtListThreaded) { fgSetStmtSeq(stmt); } GenTree* expr = stmt->GetRootNode(); estDupCostSz += expr->GetCostSz(); } bool allProfileWeightsAreValid = false; weight_t weightJump = bJump->bbWeight; weight_t weightDest = bDest->bbWeight; weight_t weightNext = bJump->bbNext->bbWeight; bool rareJump = bJump->isRunRarely(); bool rareDest = bDest->isRunRarely(); bool rareNext = bJump->bbNext->isRunRarely(); // If we have profile data then we calculate the number of time // the loop will iterate into loopIterations if (fgIsUsingProfileWeights()) { // Only rely upon the profile weight when all three of these blocks // have either good profile weights or are rarelyRun // if ((bJump->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) && (bDest->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) && (bJump->bbNext->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY))) { allProfileWeightsAreValid = true; if ((weightJump * 100) < weightDest) { rareJump = true; } if ((weightNext * 100) < weightDest) { rareNext = true; } if (((weightDest * 100) < weightJump) && ((weightDest * 100) < weightNext)) { rareDest = true; } } } unsigned maxDupCostSz = 6; // // Branches between the hot and rarely run regions // should be minimized. So we allow a larger size // if (rareDest != rareJump) { maxDupCostSz += 6; } if (rareDest != rareNext) { maxDupCostSz += 6; } // // We we are ngen-ing: // If the uncondional branch is a rarely run block then // we are willing to have more code expansion since we // won't be running code from this page // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (rareJump) { maxDupCostSz *= 2; } } // If the compare has too high cost then we don't want to dup bool costIsTooHigh = (estDupCostSz > maxDupCostSz); #ifdef DEBUG if (verbose) { printf("\nDuplication of the conditional block " FMT_BB " (always branch from " FMT_BB ") %s, because the cost of duplication (%i) is %s than %i, validProfileWeights = %s\n", bDest->bbNum, bJump->bbNum, costIsTooHigh ? "not done" : "performed", estDupCostSz, costIsTooHigh ? "greater" : "less or equal", maxDupCostSz, allProfileWeightsAreValid ? "true" : "false"); } #endif // DEBUG if (costIsTooHigh) { return false; } /* Looks good - duplicate the conditional block */ Statement* newStmtList = nullptr; // new stmt list to be added to bJump Statement* newLastStmt = nullptr; /* Visit all the statements in bDest */ for (Statement* const curStmt : bDest->Statements()) { // Clone/substitute the expression. Statement* stmt = gtCloneStmt(curStmt); // cloneExpr doesn't handle everything. if (stmt == nullptr) { return false; } if (fgStmtListThreaded) { gtSetStmtInfo(stmt); fgSetStmtSeq(stmt); } /* Append the expression to our list */ if (newStmtList != nullptr) { newLastStmt->SetNextStmt(stmt); } else { newStmtList = stmt; } stmt->SetPrevStmt(newLastStmt); newLastStmt = stmt; } // Get to the condition node from the statement tree. GenTree* condTree = newLastStmt->GetRootNode(); noway_assert(condTree->gtOper == GT_JTRUE); // Set condTree to the operand to the GT_JTRUE. condTree = condTree->AsOp()->gtOp1; // This condTree has to be a RelOp comparison. if (condTree->OperIsCompare() == false) { return false; } // Join the two linked lists. Statement* lastStmt = bJump->lastStmt(); if (lastStmt != nullptr) { Statement* stmt = bJump->firstStmt(); stmt->SetPrevStmt(newLastStmt); lastStmt->SetNextStmt(newStmtList); newStmtList->SetPrevStmt(lastStmt); } else { bJump->bbStmtList = newStmtList; newStmtList->SetPrevStmt(newLastStmt); } // // Reverse the sense of the compare // gtReverseCond(condTree); // We need to update the following flags of the bJump block if they were set in the bDest block bJump->bbFlags |= (bDest->bbFlags & (BBF_HAS_NEWOBJ | BBF_HAS_NEWARRAY | BBF_HAS_NULLCHECK | BBF_HAS_IDX_LEN)); bJump->bbJumpKind = BBJ_COND; bJump->bbJumpDest = bDest->bbNext; /* Update bbRefs and bbPreds */ // bJump now falls through into the next block // fgAddRefPred(bJump->bbNext, bJump); // bJump no longer jumps to bDest // fgRemoveRefPred(bDest, bJump); // bJump now jumps to bDest->bbNext // fgAddRefPred(bDest->bbNext, bJump); if (weightJump > 0) { if (allProfileWeightsAreValid) { if (weightDest > weightJump) { bDest->bbWeight = (weightDest - weightJump); } else if (!bDest->isRunRarely()) { bDest->bbWeight = BB_UNITY_WEIGHT; } } else { weight_t newWeightDest = 0; if (weightDest > weightJump) { newWeightDest = (weightDest - weightJump); } if (weightDest >= (BB_LOOP_WEIGHT_SCALE * BB_UNITY_WEIGHT) / 2) { newWeightDest = (weightDest * 2) / (BB_LOOP_WEIGHT_SCALE * BB_UNITY_WEIGHT); } if (newWeightDest > 0) { bDest->bbWeight = newWeightDest; } } } #if DEBUG if (verbose) { // Dump out the newStmtList that we created printf("\nfgOptimizeBranch added these statements(s) at the end of " FMT_BB ":\n", bJump->bbNum); for (Statement* stmt : StatementList(newStmtList)) { gtDispStmt(stmt); } printf("\nfgOptimizeBranch changed block " FMT_BB " from BBJ_ALWAYS to BBJ_COND.\n", bJump->bbNum); printf("\nAfter this change in fgOptimizeBranch the BB graph is:"); fgDispBasicBlocks(verboseTrees); printf("\n"); } #endif // DEBUG return true; } //----------------------------------------------------------------------------- // fgOptimizeSwitchJump: see if a switch has a dominant case, and modify to // check for that case up front (aka switch peeling). // // Returns: // True if the switch now has an upstream check for the dominant case. // bool Compiler::fgOptimizeSwitchJumps() { if (!fgHasSwitch) { return false; } bool modified = false; for (BasicBlock* const block : Blocks()) { // Lowering expands switches, so calling this method on lowered IR // does not make sense. // assert(!block->IsLIR()); if (block->bbJumpKind != BBJ_SWITCH) { continue; } if (block->isRunRarely()) { continue; } if (!block->bbJumpSwt->bbsHasDominantCase) { continue; } // We currently will only see dominant cases with PGO. // assert(block->hasProfileWeight()); const unsigned dominantCase = block->bbJumpSwt->bbsDominantCase; JITDUMP(FMT_BB " has switch with dominant case %u, considering peeling\n", block->bbNum, dominantCase); // The dominant case should not be the default case, as we already peel that one. // assert(dominantCase < (block->bbJumpSwt->bbsCount - 1)); BasicBlock* const dominantTarget = block->bbJumpSwt->bbsDstTab[dominantCase]; Statement* const switchStmt = block->lastStmt(); GenTree* const switchTree = switchStmt->GetRootNode(); assert(switchTree->OperIs(GT_SWITCH)); GenTree* const switchValue = switchTree->AsOp()->gtGetOp1(); // Split the switch block just before at the switch. // // After this, newBlock is the switch block, and // block is the upstream block. // BasicBlock* newBlock = nullptr; if (block->firstStmt() == switchStmt) { newBlock = fgSplitBlockAtBeginning(block); } else { newBlock = fgSplitBlockAfterStatement(block, switchStmt->GetPrevStmt()); } // Set up a compare in the upstream block, "stealing" the switch value tree. // GenTree* const dominantCaseCompare = gtNewOperNode(GT_EQ, TYP_INT, switchValue, gtNewIconNode(dominantCase)); GenTree* const jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, dominantCaseCompare); Statement* const jmpStmt = fgNewStmtFromTree(jmpTree, switchStmt->GetDebugInfo()); fgInsertStmtAtEnd(block, jmpStmt); // Reattach switch value to the switch. This may introduce a comma // in the upstream compare tree, if the switch value expression is complex. // switchTree->AsOp()->gtOp1 = fgMakeMultiUse(&dominantCaseCompare->AsOp()->gtOp1); // Update flags // switchTree->gtFlags = switchTree->AsOp()->gtOp1->gtFlags; dominantCaseCompare->gtFlags |= dominantCaseCompare->AsOp()->gtOp1->gtFlags; jmpTree->gtFlags |= dominantCaseCompare->gtFlags; dominantCaseCompare->gtFlags |= GTF_RELOP_JMP_USED | GTF_DONT_CSE; // Wire up the new control flow. // block->bbJumpKind = BBJ_COND; block->bbJumpDest = dominantTarget; flowList* const blockToTargetEdge = fgAddRefPred(dominantTarget, block); flowList* const blockToNewBlockEdge = newBlock->bbPreds; assert(blockToNewBlockEdge->getBlock() == block); assert(blockToTargetEdge->getBlock() == block); // Update profile data // const weight_t fraction = newBlock->bbJumpSwt->bbsDominantFraction; const weight_t blockToTargetWeight = block->bbWeight * fraction; const weight_t blockToNewBlockWeight = block->bbWeight - blockToTargetWeight; newBlock->setBBProfileWeight(blockToNewBlockWeight); blockToTargetEdge->setEdgeWeights(blockToTargetWeight, blockToTargetWeight, dominantTarget); blockToNewBlockEdge->setEdgeWeights(blockToNewBlockWeight, blockToNewBlockWeight, block); // There may be other switch cases that lead to this same block, but there's just // one edge in the flowgraph. So we need to subtract off the profile data that now flows // along the peeled edge. // for (flowList* pred = dominantTarget->bbPreds; pred != nullptr; pred = pred->flNext) { if (pred->getBlock() == newBlock) { if (pred->flDupCount == 1) { // The only switch case leading to the dominant target was the one we peeled. // So the edge from the switch now has zero weight. // pred->setEdgeWeights(BB_ZERO_WEIGHT, BB_ZERO_WEIGHT, dominantTarget); } else { // Other switch cases also lead to the dominant target. // Subtract off the weight we transferred to the peel. // weight_t newMinWeight = pred->edgeWeightMin() - blockToTargetWeight; weight_t newMaxWeight = pred->edgeWeightMax() - blockToTargetWeight; if (newMinWeight < BB_ZERO_WEIGHT) { newMinWeight = BB_ZERO_WEIGHT; } if (newMaxWeight < BB_ZERO_WEIGHT) { newMaxWeight = BB_ZERO_WEIGHT; } pred->setEdgeWeights(newMinWeight, newMaxWeight, dominantTarget); } } } // For now we leave the switch as is, since there's no way // to indicate that one of the cases is now unreachable. // // But it no longer has a dominant case. // newBlock->bbJumpSwt->bbsHasDominantCase = false; modified = true; } return modified; } //----------------------------------------------------------------------------- // fgExpandRunRarelyBlocks: given the current set of run rarely blocks, // see if we can deduce that some other blocks are run rarely. // // Returns: // True if new block was marked as run rarely. // bool Compiler::fgExpandRarelyRunBlocks() { bool result = false; #ifdef DEBUG if (verbose) { printf("\n*************** In fgExpandRarelyRunBlocks()\n"); } const char* reason = nullptr; #endif // Helper routine to figure out the lexically earliest predecessor // of bPrev that could become run rarely, given that bPrev // has just become run rarely. // // Note this is potentially expensive for large flow graphs and blocks // with lots of predecessors. // auto newRunRarely = [](BasicBlock* block, BasicBlock* bPrev) { // Figure out earliest block that might be impacted BasicBlock* bPrevPrev = nullptr; BasicBlock* tmpbb; if ((bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0) { // If we've got a BBJ_CALLFINALLY/BBJ_ALWAYS pair, treat the BBJ_CALLFINALLY as an // additional predecessor for the BBJ_ALWAYS block tmpbb = bPrev->bbPrev; noway_assert(tmpbb != nullptr); #if defined(FEATURE_EH_FUNCLETS) noway_assert(tmpbb->isBBCallAlwaysPair()); bPrevPrev = tmpbb; #else if (tmpbb->bbJumpKind == BBJ_CALLFINALLY) { bPrevPrev = tmpbb; } #endif } flowList* pred = bPrev->bbPreds; if (pred != nullptr) { // bPrevPrev will be set to the lexically // earliest predecessor of bPrev. while (pred != nullptr) { if (bPrevPrev == nullptr) { // Initially we select the first block in the bbPreds list bPrevPrev = pred->getBlock(); continue; } // Walk the flow graph lexically forward from pred->getBlock() // if we find (block == bPrevPrev) then // pred->getBlock() is an earlier predecessor. for (tmpbb = pred->getBlock(); tmpbb != nullptr; tmpbb = tmpbb->bbNext) { if (tmpbb == bPrevPrev) { /* We found an ealier predecessor */ bPrevPrev = pred->getBlock(); break; } else if (tmpbb == bPrev) { // We have reached bPrev so stop walking // as this cannot be an earlier predecessor break; } } // Onto the next predecessor pred = pred->flNext; } } if (bPrevPrev != nullptr) { // Walk the flow graph forward from bPrevPrev // if we don't find (tmpbb == bPrev) then our candidate // bPrevPrev is lexically after bPrev and we do not // want to select it as our new block for (tmpbb = bPrevPrev; tmpbb != nullptr; tmpbb = tmpbb->bbNext) { if (tmpbb == bPrev) { // Set up block back to the lexically // earliest predecessor of pPrev return bPrevPrev; } } } // No reason to backtrack // return (BasicBlock*)nullptr; }; // We expand the number of rarely run blocks by observing // that a block that falls into or jumps to a rarely run block, // must itself be rarely run and when we have a conditional // jump in which both branches go to rarely run blocks then // the block must itself be rarely run BasicBlock* block; BasicBlock* bPrev; for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext) { if (bPrev->isRunRarely()) { continue; } if (bPrev->hasProfileWeight()) { continue; } const char* reason = nullptr; switch (bPrev->bbJumpKind) { case BBJ_ALWAYS: if (bPrev->bbJumpDest->isRunRarely()) { reason = "Unconditional jump to a rarely run block"; } break; case BBJ_CALLFINALLY: if (bPrev->isBBCallAlwaysPair() && block->isRunRarely()) { reason = "Call of finally followed by a rarely run block"; } break; case BBJ_NONE: if (block->isRunRarely()) { reason = "Falling into a rarely run block"; } break; case BBJ_COND: if (block->isRunRarely() && bPrev->bbJumpDest->isRunRarely()) { reason = "Both sides of a conditional jump are rarely run"; } break; default: break; } if (reason != nullptr) { JITDUMP("%s, marking " FMT_BB " as rarely run\n", reason, bPrev->bbNum); // Must not have previously been marked noway_assert(!bPrev->isRunRarely()); // Mark bPrev as a new rarely run block bPrev->bbSetRunRarely(); // We have marked at least one block. // result = true; // See if we should to backtrack. // BasicBlock* bContinue = newRunRarely(block, bPrev); // If so, reset block to the backtrack point. // if (bContinue != nullptr) { block = bContinue; } } } // Now iterate over every block to see if we can prove that a block is rarely run // (i.e. when all predecessors to the block are rarely run) // for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext) { // If block is not run rarely, then check to make sure that it has // at least one non-rarely run block. if (!block->isRunRarely()) { bool rare = true; /* Make sure that block has at least one normal predecessor */ for (BasicBlock* const predBlock : block->PredBlocks()) { /* Find the fall through predecessor, if any */ if (!predBlock->isRunRarely()) { rare = false; break; } } if (rare) { // If 'block' is the start of a handler or filter then we cannot make it // rarely run because we may have an exceptional edge that // branches here. // if (bbIsHandlerBeg(block)) { rare = false; } } if (rare) { block->bbSetRunRarely(); result = true; #ifdef DEBUG if (verbose) { printf("All branches to " FMT_BB " are from rarely run blocks, marking as rarely run\n", block->bbNum); } #endif // DEBUG // When marking a BBJ_CALLFINALLY as rarely run we also mark // the BBJ_ALWAYS that comes after it as rarely run // if (block->isBBCallAlwaysPair()) { BasicBlock* bNext = block->bbNext; PREFIX_ASSUME(bNext != nullptr); bNext->bbSetRunRarely(); #ifdef DEBUG if (verbose) { printf("Also marking the BBJ_ALWAYS at " FMT_BB " as rarely run\n", bNext->bbNum); } #endif // DEBUG } } } /* COMPACT blocks if possible */ if (bPrev->bbJumpKind == BBJ_NONE) { if (fgCanCompactBlocks(bPrev, block)) { fgCompactBlocks(bPrev, block); block = bPrev; continue; } } // // if bPrev->bbWeight is not based upon profile data we can adjust // the weights of bPrev and block // else if (bPrev->isBBCallAlwaysPair() && // we must have a BBJ_CALLFINALLY and BBK_ALWAYS pair (bPrev->bbWeight != block->bbWeight) && // the weights are currently different !bPrev->hasProfileWeight()) // and the BBJ_CALLFINALLY block is not using profiled // weights { if (block->isRunRarely()) { bPrev->bbWeight = block->bbWeight; // the BBJ_CALLFINALLY block now has the same weight as the BBJ_ALWAYS block bPrev->bbFlags |= BBF_RUN_RARELY; // and is now rarely run #ifdef DEBUG if (verbose) { printf("Marking the BBJ_CALLFINALLY block at " FMT_BB " as rarely run because " FMT_BB " is rarely run\n", bPrev->bbNum, block->bbNum); } #endif // DEBUG } else if (bPrev->isRunRarely()) { block->bbWeight = bPrev->bbWeight; // the BBJ_ALWAYS block now has the same weight as the BBJ_CALLFINALLY block block->bbFlags |= BBF_RUN_RARELY; // and is now rarely run #ifdef DEBUG if (verbose) { printf("Marking the BBJ_ALWAYS block at " FMT_BB " as rarely run because " FMT_BB " is rarely run\n", block->bbNum, bPrev->bbNum); } #endif // DEBUG } else // Both blocks are hot, bPrev is known not to be using profiled weight { bPrev->bbWeight = block->bbWeight; // the BBJ_CALLFINALLY block now has the same weight as the BBJ_ALWAYS block } noway_assert(block->bbWeight == bPrev->bbWeight); } } return result; } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif //----------------------------------------------------------------------------- // fgReorderBlocks: reorder blocks to favor frequent fall through paths, // move rare blocks to the end of the method/eh region, and move // funclets to the ends of methods. // // Returns: // True if anything got reordered. Reordering blocks may require changing // IR to reverse branch conditions. // bool Compiler::fgReorderBlocks() { noway_assert(opts.compDbgCode == false); #if defined(FEATURE_EH_FUNCLETS) assert(fgFuncletsCreated); #endif // FEATURE_EH_FUNCLETS // We can't relocate anything if we only have one block if (fgFirstBB->bbNext == nullptr) { return false; } bool newRarelyRun = false; bool movedBlocks = false; bool optimizedSwitches = false; bool optimizedBranches = false; // First let us expand the set of run rarely blocks newRarelyRun |= fgExpandRarelyRunBlocks(); #if !defined(FEATURE_EH_FUNCLETS) movedBlocks |= fgRelocateEHRegions(); #endif // !FEATURE_EH_FUNCLETS // // If we are using profile weights we can change some // switch jumps into conditional test and jump // if (fgIsUsingProfileWeights()) { // // Note that this is currently not yet implemented // optimizedSwitches = fgOptimizeSwitchJumps(); if (optimizedSwitches) { fgUpdateFlowGraph(); } } #ifdef DEBUG if (verbose) { printf("*************** In fgReorderBlocks()\n"); printf("\nInitial BasicBlocks"); fgDispBasicBlocks(verboseTrees); printf("\n"); } #endif // DEBUG BasicBlock* bNext; BasicBlock* bPrev; BasicBlock* block; unsigned XTnum; EHblkDsc* HBtab; // Iterate over every block, remembering our previous block in bPrev for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext) { // // Consider relocating the rarely run blocks such that they are at the end of the method. // We also consider reversing conditional branches so that they become a not taken forwards branch. // // If block is marked with a BBF_KEEP_BBJ_ALWAYS flag then we don't move the block if ((block->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0) { continue; } // Finally and handlers blocks are to be kept contiguous. // TODO-CQ: Allow reordering within the handler region if (block->hasHndIndex() == true) { continue; } bool reorderBlock = true; // This is set to false if we decide not to reorder 'block' bool isRare = block->isRunRarely(); BasicBlock* bDest = nullptr; bool forwardBranch = false; bool backwardBranch = false; // Setup bDest if (bPrev->KindIs(BBJ_COND, BBJ_ALWAYS)) { bDest = bPrev->bbJumpDest; forwardBranch = fgIsForwardBranch(bPrev); backwardBranch = !forwardBranch; } // We will look for bPrev as a non rarely run block followed by block as a rarely run block // if (bPrev->isRunRarely()) { reorderBlock = false; } // If the weights of the bPrev, block and bDest were all obtained from a profile run // then we can use them to decide if it is useful to reverse this conditional branch weight_t profHotWeight = -1; if (bPrev->hasProfileWeight() && block->hasProfileWeight() && ((bDest == nullptr) || bDest->hasProfileWeight())) { // // All blocks have profile information // if (forwardBranch) { if (bPrev->bbJumpKind == BBJ_ALWAYS) { // We can pull up the blocks that the unconditional jump branches to // if the weight of bDest is greater or equal to the weight of block // also the weight of bDest can't be zero. // if ((bDest->bbWeight < block->bbWeight) || (bDest->bbWeight == BB_ZERO_WEIGHT)) { reorderBlock = false; } else { // // If this remains true then we will try to pull up bDest to succeed bPrev // bool moveDestUp = true; if (fgHaveValidEdgeWeights) { // // The edge bPrev -> bDest must have a higher minimum weight // than every other edge into bDest // flowList* edgeFromPrev = fgGetPredForBlock(bDest, bPrev); noway_assert(edgeFromPrev != nullptr); // Examine all of the other edges into bDest for (flowList* const edge : bDest->PredEdges()) { if (edge != edgeFromPrev) { if (edge->edgeWeightMax() >= edgeFromPrev->edgeWeightMin()) { moveDestUp = false; break; } } } } else { // // The block bPrev must have a higher weight // than every other block that goes into bDest // // Examine all of the other edges into bDest for (BasicBlock* const predBlock : bDest->PredBlocks()) { if ((predBlock != bPrev) && (predBlock->bbWeight >= bPrev->bbWeight)) { moveDestUp = false; break; } } } // Are we still good to move bDest up to bPrev? if (moveDestUp) { // // We will consider all blocks that have less weight than profHotWeight to be // uncommonly run blocks as compared with the hot path of bPrev taken-jump to bDest // profHotWeight = bDest->bbWeight - 1; } else { if (block->isRunRarely()) { // We will move any rarely run blocks blocks profHotWeight = 0; } else { // We will move all blocks that have a weight less or equal to our fall through block profHotWeight = block->bbWeight + 1; } // But we won't try to connect with bDest bDest = nullptr; } } } else // (bPrev->bbJumpKind == BBJ_COND) { noway_assert(bPrev->bbJumpKind == BBJ_COND); // // We will reverse branch if the taken-jump to bDest ratio (i.e. 'takenRatio') // is more than 51% // // We will setup profHotWeight to be maximum bbWeight that a block // could have for us not to want to reverse the conditional branch // // We will consider all blocks that have less weight than profHotWeight to be // uncommonly run blocks as compared with the hot path of bPrev taken-jump to bDest // if (fgHaveValidEdgeWeights) { // We have valid edge weights, however even with valid edge weights // we may have a minimum and maximum range for each edges value // // We will check that the min weight of the bPrev to bDest edge // is more than twice the max weight of the bPrev to block edge. // // bPrev --> [BB04, weight 31] // | \. // edgeToBlock -------------> O \. // [min=8,max=10] V \. // block --> [BB05, weight 10] \. // \. // edgeToDest ----------------------------> O // [min=21,max=23] | // V // bDest ---------------> [BB08, weight 21] // flowList* edgeToDest = fgGetPredForBlock(bDest, bPrev); flowList* edgeToBlock = fgGetPredForBlock(block, bPrev); noway_assert(edgeToDest != nullptr); noway_assert(edgeToBlock != nullptr); // // Calculate the taken ratio // A takenRation of 0.10 means taken 10% of the time, not taken 90% of the time // A takenRation of 0.50 means taken 50% of the time, not taken 50% of the time // A takenRation of 0.90 means taken 90% of the time, not taken 10% of the time // double takenCount = ((double)edgeToDest->edgeWeightMin() + (double)edgeToDest->edgeWeightMax()) / 2.0; double notTakenCount = ((double)edgeToBlock->edgeWeightMin() + (double)edgeToBlock->edgeWeightMax()) / 2.0; double totalCount = takenCount + notTakenCount; double takenRatio = takenCount / totalCount; // If the takenRatio is greater or equal to 51% then we will reverse the branch if (takenRatio < 0.51) { reorderBlock = false; } else { // set profHotWeight profHotWeight = (edgeToBlock->edgeWeightMin() + edgeToBlock->edgeWeightMax()) / 2 - 1; } } else { // We don't have valid edge weight so we will be more conservative // We could have bPrev, block or bDest as part of a loop and thus have extra weight // // We will do two checks: // 1. Check that the weight of bDest is at least two times more than block // 2. Check that the weight of bPrev is at least three times more than block // // bPrev --> [BB04, weight 31] // | \. // V \. // block --> [BB05, weight 10] \. // \. // | // V // bDest ---------------> [BB08, weight 21] // // For this case weightDest is calculated as (21+1)/2 or 11 // and weightPrev is calculated as (31+2)/3 also 11 // // Generally both weightDest and weightPrev should calculate // the same value unless bPrev or bDest are part of a loop // weight_t weightDest = bDest->isMaxBBWeight() ? bDest->bbWeight : (bDest->bbWeight + 1) / 2; weight_t weightPrev = bPrev->isMaxBBWeight() ? bPrev->bbWeight : (bPrev->bbWeight + 2) / 3; // select the lower of weightDest and weightPrev profHotWeight = (weightDest < weightPrev) ? weightDest : weightPrev; // if the weight of block is greater (or equal) to profHotWeight then we don't reverse the cond if (block->bbWeight >= profHotWeight) { reorderBlock = false; } } } } else // not a forwardBranch { if (bPrev->bbFallsThrough()) { goto CHECK_FOR_RARE; } // Here we should pull up the highest weight block remaining // and place it here since bPrev does not fall through. weight_t highestWeight = 0; BasicBlock* candidateBlock = nullptr; BasicBlock* lastNonFallThroughBlock = bPrev; BasicBlock* bTmp = bPrev->bbNext; while (bTmp != nullptr) { // Don't try to split a Call/Always pair // if (bTmp->isBBCallAlwaysPair()) { // Move bTmp forward bTmp = bTmp->bbNext; } // // Check for loop exit condition // if (bTmp == nullptr) { break; } // // if its weight is the highest one we've seen and // the EH regions allow for us to place bTmp after bPrev // if ((bTmp->bbWeight > highestWeight) && fgEhAllowsMoveBlock(bPrev, bTmp)) { // When we have a current candidateBlock that is a conditional (or unconditional) jump // to bTmp (which is a higher weighted block) then it is better to keep out current // candidateBlock and have it fall into bTmp // if ((candidateBlock == nullptr) || !candidateBlock->KindIs(BBJ_COND, BBJ_ALWAYS) || (candidateBlock->bbJumpDest != bTmp)) { // otherwise we have a new candidateBlock // highestWeight = bTmp->bbWeight; candidateBlock = lastNonFallThroughBlock->bbNext; } } if ((bTmp->bbFallsThrough() == false) || (bTmp->bbWeight == BB_ZERO_WEIGHT)) { lastNonFallThroughBlock = bTmp; } bTmp = bTmp->bbNext; } // If we didn't find a suitable block then skip this if (highestWeight == 0) { reorderBlock = false; } else { noway_assert(candidateBlock != nullptr); // If the candidateBlock is the same a block then skip this if (candidateBlock == block) { reorderBlock = false; } else { // Set bDest to the block that we want to come after bPrev bDest = candidateBlock; // set profHotWeight profHotWeight = highestWeight - 1; } } } } else // we don't have good profile info (or we are falling through) { CHECK_FOR_RARE:; /* We only want to reorder when we have a rarely run */ /* block right after a normal block, */ /* (bPrev is known to be a normal block at this point) */ if (!isRare) { if ((bDest == block->bbNext) && (block->bbJumpKind == BBJ_RETURN) && (bPrev->bbJumpKind == BBJ_ALWAYS)) { // This is a common case with expressions like "return Expr1 && Expr2" -- move the return // to establish fall-through. } else { reorderBlock = false; } } else { /* If the jump target bDest is also a rarely run block then we don't want to do the reversal */ if (bDest && bDest->isRunRarely()) { reorderBlock = false; /* Both block and bDest are rarely run */ } else { // We will move any rarely run blocks blocks profHotWeight = 0; } } } if (reorderBlock == false) { // // Check for an unconditional branch to a conditional branch // which also branches back to our next block // const bool optimizedBranch = fgOptimizeBranch(bPrev); if (optimizedBranch) { noway_assert(bPrev->bbJumpKind == BBJ_COND); optimizedBranches = true; } continue; } // Now we need to determine which blocks should be moved // // We consider one of two choices: // // 1. Moving the fall-through blocks (or rarely run blocks) down to // later in the method and hopefully connecting the jump dest block // so that it becomes the fall through block // // And when bDest in not NULL, we also consider: // // 2. Moving the bDest block (or blocks) up to bPrev // so that it could be used as a fall through block // // We will prefer option #1 if we are able to connect the jump dest // block as the fall though block otherwise will we try to use option #2 // // // Consider option #1: relocating blocks starting at 'block' // to later in flowgraph // // We set bStart to the first block that will be relocated // and bEnd to the last block that will be relocated BasicBlock* bStart = block; BasicBlock* bEnd = bStart; bNext = bEnd->bbNext; bool connected_bDest = false; if ((backwardBranch && !isRare) || ((block->bbFlags & BBF_DONT_REMOVE) != 0)) // Don't choose option #1 when block is the start of a try region { bStart = nullptr; bEnd = nullptr; } else { while (true) { // Don't try to split a Call/Always pair // if (bEnd->isBBCallAlwaysPair()) { // Move bEnd and bNext forward bEnd = bNext; bNext = bNext->bbNext; } // // Check for loop exit condition // if (bNext == nullptr) { break; } #if defined(FEATURE_EH_FUNCLETS) // Check if we've reached the funclets region, at the end of the function if (fgFirstFuncletBB == bEnd->bbNext) { break; } #endif // FEATURE_EH_FUNCLETS if (bNext == bDest) { connected_bDest = true; break; } // All the blocks must have the same try index // and must not have the BBF_DONT_REMOVE flag set if (!BasicBlock::sameTryRegion(bStart, bNext) || ((bNext->bbFlags & BBF_DONT_REMOVE) != 0)) { // exit the loop, bEnd is now set to the // last block that we want to relocate break; } // If we are relocating rarely run blocks.. if (isRare) { // ... then all blocks must be rarely run if (!bNext->isRunRarely()) { // exit the loop, bEnd is now set to the // last block that we want to relocate break; } } else { // If we are moving blocks that are hot then all // of the blocks moved must be less than profHotWeight */ if (bNext->bbWeight >= profHotWeight) { // exit the loop, bEnd is now set to the // last block that we would relocate break; } } // Move bEnd and bNext forward bEnd = bNext; bNext = bNext->bbNext; } // Set connected_bDest to true if moving blocks [bStart .. bEnd] // connects with the the jump dest of bPrev (i.e bDest) and // thus allows bPrev fall through instead of jump. if (bNext == bDest) { connected_bDest = true; } } // Now consider option #2: Moving the jump dest block (or blocks) // up to bPrev // // The variables bStart2, bEnd2 and bPrev2 are used for option #2 // // We will setup bStart2 to the first block that will be relocated // and bEnd2 to the last block that will be relocated // and bPrev2 to be the lexical pred of bDest // // If after this calculation bStart2 is NULL we cannot use option #2, // otherwise bStart2, bEnd2 and bPrev2 are all non-NULL and we will use option #2 BasicBlock* bStart2 = nullptr; BasicBlock* bEnd2 = nullptr; BasicBlock* bPrev2 = nullptr; // If option #1 didn't connect bDest and bDest isn't NULL if ((connected_bDest == false) && (bDest != nullptr) && // The jump target cannot be moved if it has the BBF_DONT_REMOVE flag set ((bDest->bbFlags & BBF_DONT_REMOVE) == 0)) { // We will consider option #2: relocating blocks starting at 'bDest' to succeed bPrev // // setup bPrev2 to be the lexical pred of bDest bPrev2 = block; while (bPrev2 != nullptr) { if (bPrev2->bbNext == bDest) { break; } bPrev2 = bPrev2->bbNext; } if ((bPrev2 != nullptr) && fgEhAllowsMoveBlock(bPrev, bDest)) { // We have decided that relocating bDest to be after bPrev is best // Set bStart2 to the first block that will be relocated // and bEnd2 to the last block that will be relocated // // Assigning to bStart2 selects option #2 // bStart2 = bDest; bEnd2 = bStart2; bNext = bEnd2->bbNext; while (true) { // Don't try to split a Call/Always pair // if (bEnd2->isBBCallAlwaysPair()) { noway_assert(bNext->bbJumpKind == BBJ_ALWAYS); // Move bEnd2 and bNext forward bEnd2 = bNext; bNext = bNext->bbNext; } // Check for the Loop exit conditions if (bNext == nullptr) { break; } if (bEnd2->bbFallsThrough() == false) { break; } // If we are relocating rarely run blocks.. // All the blocks must have the same try index, // and must not have the BBF_DONT_REMOVE flag set if (!BasicBlock::sameTryRegion(bStart2, bNext) || ((bNext->bbFlags & BBF_DONT_REMOVE) != 0)) { // exit the loop, bEnd2 is now set to the // last block that we want to relocate break; } if (isRare) { /* ... then all blocks must not be rarely run */ if (bNext->isRunRarely()) { // exit the loop, bEnd2 is now set to the // last block that we want to relocate break; } } else { // If we are relocating hot blocks // all blocks moved must be greater than profHotWeight if (bNext->bbWeight <= profHotWeight) { // exit the loop, bEnd2 is now set to the // last block that we want to relocate break; } } // Move bEnd2 and bNext forward bEnd2 = bNext; bNext = bNext->bbNext; } } } // If we are using option #1 then ... if (bStart2 == nullptr) { // Don't use option #1 for a backwards branch if (bStart == nullptr) { continue; } // .... Don't move a set of blocks that are already at the end of the main method if (bEnd == fgLastBBInMainFunction()) { continue; } } #ifdef DEBUG if (verbose) { if (bDest != nullptr) { if (bPrev->bbJumpKind == BBJ_COND) { printf("Decided to reverse conditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); } else if (bPrev->bbJumpKind == BBJ_ALWAYS) { printf("Decided to straighten unconditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); } else { printf("Decided to place hot code after " FMT_BB ", placed " FMT_BB " after this block ", bPrev->bbNum, bDest->bbNum); } if (profHotWeight > 0) { printf("because of IBC profile data\n"); } else { if (bPrev->bbFallsThrough()) { printf("since it falls into a rarely run block\n"); } else { printf("since it is succeeded by a rarely run block\n"); } } } else { printf("Decided to relocate block(s) after block " FMT_BB " since they are %s block(s)\n", bPrev->bbNum, block->isRunRarely() ? "rarely run" : "uncommonly run"); } } #endif // DEBUG // We will set insertAfterBlk to the block the precedes our insertion range // We will set bStartPrev to be the block that precedes the set of blocks that we are moving BasicBlock* insertAfterBlk; BasicBlock* bStartPrev; if (bStart2 != nullptr) { // Option #2: relocating blocks starting at 'bDest' to follow bPrev // Update bStart and bEnd so that we can use these two for all later operations bStart = bStart2; bEnd = bEnd2; // Set bStartPrev to be the block that comes before bStart bStartPrev = bPrev2; // We will move [bStart..bEnd] to immediately after bPrev insertAfterBlk = bPrev; } else { // option #1: Moving the fall-through blocks (or rarely run blocks) down to later in the method // Set bStartPrev to be the block that come before bStart bStartPrev = bPrev; // We will move [bStart..bEnd] but we will pick the insert location later insertAfterBlk = nullptr; } // We are going to move [bStart..bEnd] so they can't be NULL noway_assert(bStart != nullptr); noway_assert(bEnd != nullptr); // bEnd can't be a BBJ_CALLFINALLY unless it is a RETLESS call noway_assert((bEnd->bbJumpKind != BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); // bStartPrev must be set to the block that precedes bStart noway_assert(bStartPrev->bbNext == bStart); // Since we will be unlinking [bStart..bEnd], // we need to compute and remember if bStart is in each of // the try and handler regions // bool* fStartIsInTry = nullptr; bool* fStartIsInHnd = nullptr; if (compHndBBtabCount > 0) { fStartIsInTry = new (this, CMK_Unknown) bool[compHndBBtabCount]; fStartIsInHnd = new (this, CMK_Unknown) bool[compHndBBtabCount]; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { fStartIsInTry[XTnum] = HBtab->InTryRegionBBRange(bStart); fStartIsInHnd[XTnum] = HBtab->InHndRegionBBRange(bStart); } } /* Temporarily unlink [bStart..bEnd] from the flow graph */ fgUnlinkRange(bStart, bEnd); if (insertAfterBlk == nullptr) { // Find new location for the unlinked block(s) // Set insertAfterBlk to the block which will precede the insertion point if (!bStart->hasTryIndex() && isRare) { // We'll just insert the blocks at the end of the method. If the method // has funclets, we will insert at the end of the main method but before // any of the funclets. Note that we create funclets before we call // fgReorderBlocks(). insertAfterBlk = fgLastBBInMainFunction(); noway_assert(insertAfterBlk != bPrev); } else { BasicBlock* startBlk; BasicBlock* lastBlk; EHblkDsc* ehDsc = ehInitTryBlockRange(bStart, &startBlk, &lastBlk); BasicBlock* endBlk; /* Setup startBlk and endBlk as the range to search */ if (ehDsc != nullptr) { endBlk = lastBlk->bbNext; /* Multiple (nested) try regions might start from the same BB. For example, try3 try2 try1 |--- |--- |--- BB01 | | | BB02 | | |--- BB03 | | BB04 | |------------ BB05 | BB06 |------------------- BB07 Now if we want to insert in try2 region, we will start with startBlk=BB01. The following loop will allow us to start from startBlk==BB04. */ while (!BasicBlock::sameTryRegion(startBlk, bStart) && (startBlk != endBlk)) { startBlk = startBlk->bbNext; } // startBlk cannot equal endBlk as it must come before endBlk if (startBlk == endBlk) { goto CANNOT_MOVE; } // we also can't start searching the try region at bStart if (startBlk == bStart) { // if bEnd is the last block in the method or // or if bEnd->bbNext is in a different try region // then we cannot move the blocks // if ((bEnd->bbNext == nullptr) || !BasicBlock::sameTryRegion(startBlk, bEnd->bbNext)) { goto CANNOT_MOVE; } startBlk = bEnd->bbNext; // Check that the new startBlk still comes before endBlk // startBlk cannot equal endBlk as it must come before endBlk if (startBlk == endBlk) { goto CANNOT_MOVE; } BasicBlock* tmpBlk = startBlk; while ((tmpBlk != endBlk) && (tmpBlk != nullptr)) { tmpBlk = tmpBlk->bbNext; } // when tmpBlk is NULL that means startBlk is after endBlk // so there is no way to move bStart..bEnd within the try region if (tmpBlk == nullptr) { goto CANNOT_MOVE; } } } else { noway_assert(isRare == false); /* We'll search through the entire main method */ startBlk = fgFirstBB; endBlk = fgEndBBAfterMainFunction(); } // Calculate nearBlk and jumpBlk and then call fgFindInsertPoint() // to find our insertion block // { // If the set of blocks that we are moving ends with a BBJ_ALWAYS to // another [rarely run] block that comes after bPrev (forward branch) // then we can set up nearBlk to eliminate this jump sometimes // BasicBlock* nearBlk = nullptr; BasicBlock* jumpBlk = nullptr; if ((bEnd->bbJumpKind == BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && fgIsForwardBranch(bEnd, bPrev)) { // Set nearBlk to be the block in [startBlk..endBlk] // such that nearBlk->bbNext == bEnd->JumpDest // if no such block exists then set nearBlk to NULL nearBlk = startBlk; jumpBlk = bEnd; do { // We do not want to set nearBlk to bPrev // since then we will not move [bStart..bEnd] // if (nearBlk != bPrev) { // Check if nearBlk satisfies our requirement if (nearBlk->bbNext == bEnd->bbJumpDest) { break; } } // Did we reach the endBlk? if (nearBlk == endBlk) { nearBlk = nullptr; break; } // advance nearBlk to the next block nearBlk = nearBlk->bbNext; } while (nearBlk != nullptr); } // if nearBlk is NULL then we set nearBlk to be the // first block that we want to insert after. if (nearBlk == nullptr) { if (bDest != nullptr) { // we want to insert after bDest nearBlk = bDest; } else { // we want to insert after bPrev nearBlk = bPrev; } } /* Set insertAfterBlk to the block which we will insert after. */ insertAfterBlk = fgFindInsertPoint(bStart->bbTryIndex, true, // Insert in the try region. startBlk, endBlk, nearBlk, jumpBlk, bStart->bbWeight == BB_ZERO_WEIGHT); } /* See if insertAfterBlk is the same as where we started, */ /* or if we could not find any insertion point */ if ((insertAfterBlk == bPrev) || (insertAfterBlk == nullptr)) { CANNOT_MOVE:; /* We couldn't move the blocks, so put everything back */ /* relink [bStart .. bEnd] into the flow graph */ bPrev->setNext(bStart); if (bEnd->bbNext) { bEnd->bbNext->bbPrev = bEnd; } #ifdef DEBUG if (verbose) { if (bStart != bEnd) { printf("Could not relocate blocks (" FMT_BB " .. " FMT_BB ")\n", bStart->bbNum, bEnd->bbNum); } else { printf("Could not relocate block " FMT_BB "\n", bStart->bbNum); } } #endif // DEBUG continue; } } } noway_assert(insertAfterBlk != nullptr); noway_assert(bStartPrev != nullptr); noway_assert(bStartPrev != insertAfterBlk); #ifdef DEBUG movedBlocks = true; if (verbose) { const char* msg; if (bStart2 != nullptr) { msg = "hot"; } else { if (isRare) { msg = "rarely run"; } else { msg = "uncommon"; } } printf("Relocated %s ", msg); if (bStart != bEnd) { printf("blocks (" FMT_BB " .. " FMT_BB ")", bStart->bbNum, bEnd->bbNum); } else { printf("block " FMT_BB, bStart->bbNum); } if (bPrev->bbJumpKind == BBJ_COND) { printf(" by reversing conditional jump at " FMT_BB "\n", bPrev->bbNum); } else { printf("\n", bPrev->bbNum); } } #endif // DEBUG if (bPrev->bbJumpKind == BBJ_COND) { /* Reverse the bPrev jump condition */ Statement* condTestStmt = bPrev->lastStmt(); GenTree* condTest = condTestStmt->GetRootNode(); noway_assert(condTest->gtOper == GT_JTRUE); condTest->AsOp()->gtOp1 = gtReverseCond(condTest->AsOp()->gtOp1); if (bStart2 == nullptr) { /* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */ bPrev->bbJumpDest = bStart; } else { noway_assert(insertAfterBlk == bPrev); noway_assert(insertAfterBlk->bbNext == block); /* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */ bPrev->bbJumpDest = block; } } // If we are moving blocks that are at the end of a try or handler // we will need to shorten ebdTryLast or ebdHndLast // ehUpdateLastBlocks(bEnd, bStartPrev); // If we are moving blocks into the end of a try region or handler region // we will need to extend ebdTryLast or ebdHndLast so the blocks that we // are moving are part of this try or handler region. // for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Are we moving blocks to the end of a try region? if (HBtab->ebdTryLast == insertAfterBlk) { if (fStartIsInTry[XTnum]) { // bStart..bEnd is in the try, so extend the try region fgSetTryEnd(HBtab, bEnd); } } // Are we moving blocks to the end of a handler region? if (HBtab->ebdHndLast == insertAfterBlk) { if (fStartIsInHnd[XTnum]) { // bStart..bEnd is in the handler, so extend the handler region fgSetHndEnd(HBtab, bEnd); } } } /* We have decided to insert the block(s) after 'insertAfterBlk' */ fgMoveBlocksAfter(bStart, bEnd, insertAfterBlk); if (bDest) { /* We may need to insert an unconditional branch after bPrev to bDest */ fgConnectFallThrough(bPrev, bDest); } else { /* If bPrev falls through, we must insert a jump to block */ fgConnectFallThrough(bPrev, block); } BasicBlock* bSkip = bEnd->bbNext; /* If bEnd falls through, we must insert a jump to bNext */ fgConnectFallThrough(bEnd, bNext); if (bStart2 == nullptr) { /* If insertAfterBlk falls through, we are forced to */ /* add a jump around the block(s) we just inserted */ fgConnectFallThrough(insertAfterBlk, bSkip); } else { /* We may need to insert an unconditional branch after bPrev2 to bStart */ fgConnectFallThrough(bPrev2, bStart); } #if DEBUG if (verbose) { printf("\nAfter this change in fgReorderBlocks the BB graph is:"); fgDispBasicBlocks(verboseTrees); printf("\n"); } fgVerifyHandlerTab(); // Make sure that the predecessor lists are accurate if (expensiveDebugCheckLevel >= 2) { fgDebugCheckBBlist(); } #endif // DEBUG // Set our iteration point 'block' to be the new bPrev->bbNext // It will be used as the next bPrev block = bPrev->bbNext; } // end of for loop(bPrev,block) const bool changed = movedBlocks || newRarelyRun || optimizedSwitches || optimizedBranches; if (changed) { fgNeedsUpdateFlowGraph = true; #if DEBUG // Make sure that the predecessor lists are accurate if (expensiveDebugCheckLevel >= 2) { fgDebugCheckBBlist(); } #endif // DEBUG } return changed; } #ifdef _PREFAST_ #pragma warning(pop) #endif //------------------------------------------------------------- // fgUpdateFlowGraph: Removes any empty blocks, unreachable blocks, and redundant jumps. // Most of those appear after dead store removal and folding of conditionals. // Also, compact consecutive basic blocks. // // Arguments: // doTailDuplication - true to attempt tail duplication optimization // // Returns: true if the flowgraph has been modified // // Notes: // Debuggable code and Min Optimization JIT also introduces basic blocks // but we do not optimize those! // bool Compiler::fgUpdateFlowGraph(bool doTailDuplication) { #ifdef DEBUG if (verbose) { printf("\n*************** In fgUpdateFlowGraph()"); } #endif // DEBUG /* This should never be called for debuggable code */ noway_assert(opts.OptimizationEnabled()); #ifdef DEBUG if (verbose) { printf("\nBefore updating the flow graph:\n"); fgDispBasicBlocks(verboseTrees); printf("\n"); } #endif // DEBUG /* Walk all the basic blocks - look for unconditional jumps, empty blocks, blocks to compact, etc... * * OBSERVATION: * Once a block is removed the predecessors are not accurate (assuming they were at the beginning) * For now we will only use the information in bbRefs because it is easier to be updated */ bool modified = false; bool change; do { change = false; BasicBlock* block; // the current block BasicBlock* bPrev = nullptr; // the previous non-worthless block BasicBlock* bNext; // the successor of the current block BasicBlock* bDest; // the jump target of the current block for (block = fgFirstBB; block != nullptr; block = block->bbNext) { /* Some blocks may be already marked removed by other optimizations * (e.g worthless loop removal), without being explicitly removed * from the list. */ if (block->bbFlags & BBF_REMOVED) { if (bPrev) { bPrev->setNext(block->bbNext); } else { /* WEIRD first basic block is removed - should have an assert here */ noway_assert(!"First basic block marked as BBF_REMOVED???"); fgFirstBB = block->bbNext; } continue; } /* We jump to the REPEAT label if we performed a change involving the current block * This is in case there are other optimizations that can show up * (e.g. - compact 3 blocks in a row) * If nothing happens, we then finish the iteration and move to the next block */ REPEAT:; bNext = block->bbNext; bDest = nullptr; if (block->bbJumpKind == BBJ_ALWAYS) { bDest = block->bbJumpDest; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, bDest)) { change = true; modified = true; bDest = block->bbJumpDest; bNext = block->bbNext; } } if (block->bbJumpKind == BBJ_NONE) { bDest = nullptr; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->bbNext)) { change = true; modified = true; bDest = block->bbJumpDest; bNext = block->bbNext; } } // Remove JUMPS to the following block // and optimize any JUMPS to JUMPS if (block->KindIs(BBJ_COND, BBJ_ALWAYS)) { bDest = block->bbJumpDest; if (bDest == bNext) { if (fgOptimizeBranchToNext(block, bNext, bPrev)) { change = true; modified = true; bDest = nullptr; } } } if (bDest != nullptr) { // Do we have a JUMP to an empty unconditional JUMP block? if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { if (fgOptimizeBranchToEmptyUnconditional(block, bDest)) { change = true; modified = true; goto REPEAT; } } // Check for cases where reversing the branch condition may enable // other flow opts. // // Current block falls through to an empty bNext BBJ_ALWAYS, and // (a) block jump target is bNext's bbNext. // (b) block jump target is elsewhere but join free, and // bNext's jump target has a join. // if ((block->bbJumpKind == BBJ_COND) && // block is a BBJ_COND block (bNext != nullptr) && // block is not the last block (bNext->bbRefs == 1) && // No other block jumps to bNext (bNext->bbJumpKind == BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block bNext->isEmpty() && // and it is an an empty block (bNext != bNext->bbJumpDest) && // special case for self jumps (bDest != fgFirstColdBlock)) { // case (a) // const bool isJumpAroundEmpty = (bNext->bbNext == bDest); // case (b) // // Note the asymetric checks for refs == 1 and refs > 1 ensures that we // differentiate the roles played by bDest and bNextJumpDest. We need some // sense of which arrangement is preferable to avoid getting stuck in a loop // reversing and re-reversing. // // Other tiebreaking criteria could be considered. // // Pragmatic constraints: // // * don't consider lexical predecessors, or we may confuse loop recognition // * don't consider blocks of different rarities // BasicBlock* const bNextJumpDest = bNext->bbJumpDest; const bool isJumpToJoinFree = !isJumpAroundEmpty && (bDest->bbRefs == 1) && (bNextJumpDest->bbRefs > 1) && (bDest->bbNum > block->bbNum) && (block->isRunRarely() == bDest->isRunRarely()); bool optimizeJump = isJumpAroundEmpty || isJumpToJoinFree; // We do not optimize jumps between two different try regions. // However jumping to a block that is not in any try region is OK // if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) { optimizeJump = false; } // Also consider bNext's try region // if (bNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bNext)) { optimizeJump = false; } // If we are optimizing using real profile weights // then don't optimize a conditional jump to an unconditional jump // until after we have computed the edge weights // if (fgIsUsingProfileWeights()) { // if block and bdest are in different hot/cold regions we can't do this this optimization // because we can't allow fall-through into the cold region. if (!fgEdgeWeightsComputed || fgInDifferentRegions(block, bDest)) { fgNeedsUpdateFlowGraph = true; optimizeJump = false; } } if (optimizeJump && isJumpToJoinFree) { // In the join free case, we also need to move bDest right after bNext // to create same flow as in the isJumpAroundEmpty case. // if (!fgEhAllowsMoveBlock(bNext, bDest) || bDest->isBBCallAlwaysPair()) { optimizeJump = false; } else { // We don't expect bDest to already be right after bNext. // assert(bDest != bNext->bbNext); JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum, bNext->bbNum); // If bDest can fall through we'll need to create a jump // block after it too. Remember where to jump to. // BasicBlock* const bDestNext = bDest->bbNext; // Move bDest // if (ehIsBlockEHLast(bDest)) { ehUpdateLastBlocks(bDest, bDest->bbPrev); } fgUnlinkBlock(bDest); fgInsertBBafter(bNext, bDest); if (ehIsBlockEHLast(bNext)) { ehUpdateLastBlocks(bNext, bDest); } // Add fall through fixup block, if needed. // if (bDest->KindIs(BBJ_NONE, BBJ_COND)) { BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); bFixup->inheritWeight(bDestNext); bFixup->bbJumpDest = bDestNext; fgRemoveRefPred(bDestNext, bDest); fgAddRefPred(bFixup, bDest); fgAddRefPred(bDestNext, bFixup); } } } if (optimizeJump) { JITDUMP("\nReversing a conditional jump around an unconditional jump (" FMT_BB " -> " FMT_BB ", " FMT_BB " -> " FMT_BB ")\n", block->bbNum, bDest->bbNum, bNext->bbNum, bNextJumpDest->bbNum); // Reverse the jump condition // GenTree* test = block->lastNode(); noway_assert(test->OperIsConditionalJump()); if (test->OperGet() == GT_JTRUE) { GenTree* cond = gtReverseCond(test->AsOp()->gtOp1); assert(cond == test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. test->AsOp()->gtOp1 = cond; } else { gtReverseCond(test); } // Optimize the Conditional JUMP to go to the new target block->bbJumpDest = bNext->bbJumpDest; fgAddRefPred(bNext->bbJumpDest, block, fgRemoveRefPred(bNext->bbJumpDest, bNext)); /* Unlink bNext from the BasicBlock list; note that we can do this even though other blocks could jump to it - the reason is that elsewhere in this function we always redirect jumps to jumps to jump to the final label, so even if another block jumps to bNext it won't matter once we're done since any such jump will be redirected to the final target by the time we're done here. */ fgRemoveRefPred(bNext, block); fgUnlinkBlock(bNext); /* Mark the block as removed */ bNext->bbFlags |= BBF_REMOVED; // Update the loop table if we removed the bottom of a loop, for example. fgUpdateLoopsAfterCompacting(block, bNext); // If this block was aligned, unmark it bNext->unmarkLoopAlign(this DEBUG_ARG("Optimized jump")); // If this is the first Cold basic block update fgFirstColdBlock if (bNext == fgFirstColdBlock) { fgFirstColdBlock = bNext->bbNext; } // // If we removed the end of a try region or handler region // we will need to update ebdTryLast or ebdHndLast. // for (EHblkDsc* const HBtab : EHClauses(this)) { if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext)) { fgSkipRmvdBlocks(HBtab); } } // we optimized this JUMP - goto REPEAT to catch similar cases change = true; modified = true; #ifdef DEBUG if (verbose) { printf("\nAfter reversing the jump:\n"); fgDispBasicBlocks(verboseTrees); } #endif // DEBUG /* For a rare special case we cannot jump to REPEAT as jumping to REPEAT will cause us to delete 'block' because it currently appears to be unreachable. As it is a self loop that only has a single bbRef (itself) However since the unlinked bNext has additional bbRefs (that we will later connect to 'block'), it is not really unreachable. */ if ((bNext->bbRefs > 0) && (bNext->bbJumpDest == block) && (block->bbRefs == 1)) { continue; } goto REPEAT; } } } // // Update the switch jump table such that it follows jumps to jumps: // if (block->bbJumpKind == BBJ_SWITCH) { if (fgOptimizeSwitchBranches(block)) { change = true; modified = true; goto REPEAT; } } noway_assert(!(block->bbFlags & BBF_REMOVED)); /* COMPACT blocks if possible */ if (fgCanCompactBlocks(block, bNext)) { fgCompactBlocks(block, bNext); /* we compacted two blocks - goto REPEAT to catch similar cases */ change = true; modified = true; goto REPEAT; } /* Remove unreachable or empty blocks - do not consider blocks marked BBF_DONT_REMOVE or genReturnBB block * These include first and last block of a TRY, exception handlers and RANGE_CHECK_FAIL THROW blocks */ if ((block->bbFlags & BBF_DONT_REMOVE) == BBF_DONT_REMOVE || block == genReturnBB) { bPrev = block; continue; } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't remove the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. if (block->countOfInEdges() == 0 && bPrev->bbJumpKind == BBJ_CALLFINALLY) { assert(bPrev->isBBCallAlwaysPair()); noway_assert(!(bPrev->bbFlags & BBF_RETLESS_CALL)); noway_assert(block->bbJumpKind == BBJ_ALWAYS); bPrev = block; continue; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) noway_assert(!block->bbCatchTyp); noway_assert(!(block->bbFlags & BBF_TRY_BEG)); /* Remove unreachable blocks * * We'll look for blocks that have countOfInEdges() = 0 (blocks may become * unreachable due to a BBJ_ALWAYS introduced by conditional folding for example) */ if (block->countOfInEdges() == 0) { /* no references -> unreachable - remove it */ /* For now do not update the bbNum, do it at the end */ fgRemoveBlock(block, /* unreachable */ true); change = true; modified = true; /* we removed the current block - the rest of the optimizations won't have a target * continue with the next one */ continue; } else if (block->countOfInEdges() == 1) { switch (block->bbJumpKind) { case BBJ_COND: case BBJ_ALWAYS: if (block->bbJumpDest == block) { fgRemoveBlock(block, /* unreachable */ true); change = true; modified = true; /* we removed the current block - the rest of the optimizations * won't have a target so continue with the next block */ continue; } break; default: break; } } noway_assert(!(block->bbFlags & BBF_REMOVED)); /* Remove EMPTY blocks */ if (block->isEmpty()) { assert(bPrev == block->bbPrev); if (fgOptimizeEmptyBlock(block)) { change = true; modified = true; } /* Have we removed the block? */ if (block->bbFlags & BBF_REMOVED) { /* block was removed - no change to bPrev */ continue; } } /* Set the predecessor of the last reachable block * If we removed the current block, the predecessor remains unchanged * otherwise, since the current block is ok, it becomes the predecessor */ noway_assert(!(block->bbFlags & BBF_REMOVED)); bPrev = block; } } while (change); fgNeedsUpdateFlowGraph = false; #ifdef DEBUG if (verbose && modified) { printf("\nAfter updating the flow graph:\n"); fgDispBasicBlocks(verboseTrees); fgDispHandlerTab(); } if (compRationalIRForm) { for (BasicBlock* const block : Blocks()) { LIR::AsRange(block).CheckLIR(this); } } fgVerifyHandlerTab(); // Make sure that the predecessor lists are accurate fgDebugCheckBBlist(); fgDebugCheckUpdate(); #endif // DEBUG return modified; } #ifdef _PREFAST_ #pragma warning(pop) #endif //------------------------------------------------------------- // fgGetCodeEstimate: Compute a code size estimate for the block, including all statements // and block control flow. // // Arguments: // block - block to consider // // Returns: // Code size estimate for block // unsigned Compiler::fgGetCodeEstimate(BasicBlock* block) { unsigned costSz = 0; // estimate of block's code size cost switch (block->bbJumpKind) { case BBJ_NONE: costSz = 0; break; case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: case BBJ_COND: costSz = 2; break; case BBJ_CALLFINALLY: costSz = 5; break; case BBJ_SWITCH: costSz = 10; break; case BBJ_THROW: costSz = 1; // We place a int3 after the code for a throw block break; case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: costSz = 1; break; case BBJ_RETURN: // return from method costSz = 3; break; default: noway_assert(!"Bad bbJumpKind"); break; } for (Statement* const stmt : block->NonPhiStatements()) { unsigned char cost = stmt->GetCostSz(); costSz += cost; } return costSz; } #ifdef FEATURE_JIT_METHOD_PERF //------------------------------------------------------------------------ // fgMeasureIR: count and return the number of IR nodes in the function. // unsigned Compiler::fgMeasureIR() { unsigned nodeCount = 0; for (BasicBlock* const block : Blocks()) { if (!block->IsLIR()) { for (Statement* const stmt : block->Statements()) { fgWalkTreePre(stmt->GetRootNodePointer(), [](GenTree** slot, fgWalkData* data) -> Compiler::fgWalkResult { (*reinterpret_cast<unsigned*>(data->pCallbackData))++; return Compiler::WALK_CONTINUE; }, &nodeCount); } } else { for (GenTree* node : LIR::AsRange(block)) { nodeCount++; } } } return nodeCount; } #endif // FEATURE_JIT_METHOD_PERF //------------------------------------------------------------------------ // fgCompDominatedByExceptionalEntryBlocks: compute blocks that are // dominated by not normal entry. // void Compiler::fgCompDominatedByExceptionalEntryBlocks() { assert(fgEnterBlksSetValid); if (BlockSetOps::Count(this, fgEnterBlks) != 1) // There are exception entries. { for (unsigned i = 1; i <= fgBBNumMax; ++i) { BasicBlock* block = fgBBInvPostOrder[i]; if (BlockSetOps::IsMember(this, fgEnterBlks, block->bbNum)) { if (fgFirstBB != block) // skip the normal entry. { block->SetDominatedByExceptionalEntryFlag(); } } else if (block->bbIDom->IsDominatedByExceptionalEntryFlag()) { block->SetDominatedByExceptionalEntryFlag(); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "lower.h" // for LowerRange() // Flowgraph Optimization //------------------------------------------------------------------------ // fgDominate: Returns true if block `b1` dominates block `b2`. // // Arguments: // b1, b2 -- Two blocks to compare. // // Return Value: // true if `b1` dominates `b2`. If either b1 or b2 were created after dominators were calculated, // but the dominator information still exists, try to determine if we can make a statement about // b1 dominating b2 based on existing dominator information and other information, such as // predecessor lists or loop information. // // Assumptions: // -- Dominators have been calculated (`fgDomsComputed` is true). // bool Compiler::fgDominate(BasicBlock* b1, BasicBlock* b2) { noway_assert(fgDomsComputed); assert(!fgCheapPredsValid); // // If the fgModified flag is false then we made some modifications to // the flow graph, like adding a new block or changing a conditional branch // into an unconditional branch. // // We can continue to use the dominator and reachable information to // unmark loops as long as we haven't renumbered the blocks or we aren't // asking for information about a new block. // if (b2->bbNum > fgDomBBcount) { if (b1 == b2) { return true; } for (BasicBlock* const predBlock : b2->PredBlocks()) { if (!fgDominate(b1, predBlock)) { return false; } } return b2->bbPreds != nullptr; } if (b1->bbNum > fgDomBBcount) { // If b1 is a loop preheader (that was created after the dominators were calculated), // then it has a single successor that is the loop entry, and it is the only non-loop // predecessor of the loop entry. Thus, b1 dominates the loop entry and also dominates // what the loop entry dominates. if (b1->bbFlags & BBF_LOOP_PREHEADER) { BasicBlock* loopEntry = b1->GetUniqueSucc(); assert(loopEntry != nullptr); return fgDominate(loopEntry, b2); } // unknown dominators; err on the safe side and return false return false; } /* Check if b1 dominates b2 */ unsigned numA = b1->bbNum; noway_assert(numA <= fgDomBBcount); unsigned numB = b2->bbNum; noway_assert(numB <= fgDomBBcount); // What we want to ask here is basically if A is in the middle of the path from B to the root (the entry node) // in the dominator tree. Turns out that can be translated as: // // A dom B <-> preorder(A) <= preorder(B) && postorder(A) >= postorder(B) // // where the equality holds when you ask if A dominates itself. bool treeDom = fgDomTreePreOrder[numA] <= fgDomTreePreOrder[numB] && fgDomTreePostOrder[numA] >= fgDomTreePostOrder[numB]; return treeDom; } //------------------------------------------------------------------------ // fgReachable: Returns true if block `b1` can reach block `b2`. // // Arguments: // b1, b2 -- Two blocks to compare. // // Return Value: // true if `b1` can reach `b2` via some path. If either b1 or b2 were created after dominators were calculated, // but the dominator information still exists, try to determine if we can make a statement about // b1 reaching b2 based on existing reachability information and other information, such as // predecessor lists. // // Assumptions: // -- Dominators have been calculated (`fgDomsComputed` is true). // -- Reachability information has been calculated (`fgReachabilitySetsValid` is true). // bool Compiler::fgReachable(BasicBlock* b1, BasicBlock* b2) { noway_assert(fgDomsComputed); assert(!fgCheapPredsValid); // // If the fgModified flag is false then we made some modifications to // the flow graph, like adding a new block or changing a conditional branch // into an unconditional branch. // // We can continue to use the dominator and reachable information to // unmark loops as long as we haven't renumbered the blocks or we aren't // asking for information about a new block // if (b2->bbNum > fgDomBBcount) { if (b1 == b2) { return true; } for (BasicBlock* const predBlock : b2->PredBlocks()) { if (fgReachable(b1, predBlock)) { return true; } } return false; } if (b1->bbNum > fgDomBBcount) { noway_assert(b1->KindIs(BBJ_NONE, BBJ_ALWAYS, BBJ_COND)); if (b1->KindIs(BBJ_NONE, BBJ_COND) && fgReachable(b1->bbNext, b2)) { return true; } if (b1->KindIs(BBJ_ALWAYS, BBJ_COND) && fgReachable(b1->bbJumpDest, b2)) { return true; } return false; } /* Check if b1 can reach b2 */ assert(fgReachabilitySetsValid); assert(BasicBlockBitSetTraits::GetSize(this) == fgDomBBcount + 1); return BlockSetOps::IsMember(this, b2->bbReach, b1->bbNum); } //------------------------------------------------------------------------ // fgUpdateChangedFlowGraph: Update changed flow graph information. // // If the flow graph has changed, we need to recompute various information if we want to use it again. // This does similar work to `fgComputeReachability`, but the caller can pick and choose what needs // to be recomputed if they know certain things do NOT need to be recomputed. // // Arguments: // computePreds -- `true` if we should recompute predecessors // computeDoms -- `true` if we should recompute dominators // computeReturnBlocks -- `true` if we should recompute the list of return blocks // computeLoops -- `true` if we should recompute the loop table // void Compiler::fgUpdateChangedFlowGraph(const bool computePreds, const bool computeDoms, const bool computeReturnBlocks, const bool computeLoops) { // We need to clear this so we don't hit an assert calling fgRenumberBlocks(). fgDomsComputed = false; if (computeReturnBlocks) { fgComputeReturnBlocks(); } JITDUMP("\nRenumbering the basic blocks for fgUpdateChangeFlowGraph\n"); fgRenumberBlocks(); if (computePreds) // This condition is only here until all phases don't require it. { fgComputePreds(); } fgComputeEnterBlocksSet(); fgComputeReachabilitySets(); if (computeDoms) { fgComputeDoms(); } if (computeLoops) { // Reset the loop info annotations and find the loops again. // Note: this is similar to `RecomputeLoopInfo`. optResetLoopInfo(); optSetBlockWeights(); optFindLoops(); } } //------------------------------------------------------------------------ // fgComputeReachabilitySets: Compute the bbReach sets. // // This can be called to recompute the bbReach sets after the flow graph changes, such as when the // number of BasicBlocks change (and thus, the BlockSet epoch changes). // // This also sets the BBF_GC_SAFE_POINT flag on blocks. // // TODO-Throughput: This algorithm consumes O(n^2) because we're using dense bitsets to // represent reachability. While this yields O(1) time queries, it bloats the memory usage // for large code. We can do better if we try to approach reachability by // computing the strongly connected components of the flow graph. That way we only need // linear memory to label every block with its SCC. // // Assumptions: // Assumes the predecessor lists are correct. // void Compiler::fgComputeReachabilitySets() { assert(fgComputePredsDone); assert(!fgCheapPredsValid); #ifdef DEBUG fgReachabilitySetsValid = false; #endif // DEBUG for (BasicBlock* const block : Blocks()) { // Initialize the per-block bbReach sets. It creates a new empty set, // because the block epoch could change since the previous initialization // and the old set could have wrong size. block->bbReach = BlockSetOps::MakeEmpty(this); /* Mark block as reaching itself */ BlockSetOps::AddElemD(this, block->bbReach, block->bbNum); } // Find the reachable blocks. Also, set BBF_GC_SAFE_POINT. bool change; BlockSet newReach(BlockSetOps::MakeEmpty(this)); do { change = false; for (BasicBlock* const block : Blocks()) { BlockSetOps::Assign(this, newReach, block->bbReach); bool predGcSafe = (block->bbPreds != nullptr); // Do all of our predecessor blocks have a GC safe bit? for (BasicBlock* const predBlock : block->PredBlocks()) { /* Union the predecessor's reachability set into newReach */ BlockSetOps::UnionD(this, newReach, predBlock->bbReach); if (!(predBlock->bbFlags & BBF_GC_SAFE_POINT)) { predGcSafe = false; } } if (predGcSafe) { block->bbFlags |= BBF_GC_SAFE_POINT; } if (!BlockSetOps::Equal(this, newReach, block->bbReach)) { BlockSetOps::Assign(this, block->bbReach, newReach); change = true; } } } while (change); #ifdef DEBUG if (verbose) { printf("\nAfter computing reachability sets:\n"); fgDispReach(); } fgReachabilitySetsValid = true; #endif // DEBUG } //------------------------------------------------------------------------ // fgComputeReturnBlocks: Compute the set of BBJ_RETURN blocks. // // Initialize `fgReturnBlocks` to a list of the BBJ_RETURN blocks in the function. // void Compiler::fgComputeReturnBlocks() { fgReturnBlocks = nullptr; for (BasicBlock* const block : Blocks()) { // If this is a BBJ_RETURN block, add it to our list of all BBJ_RETURN blocks. This list is only // used to find return blocks. if (block->bbJumpKind == BBJ_RETURN) { fgReturnBlocks = new (this, CMK_Reachability) BasicBlockList(block, fgReturnBlocks); } } fgReturnBlocksComputed = true; #ifdef DEBUG if (verbose) { printf("Return blocks:"); if (fgReturnBlocks == nullptr) { printf(" NONE"); } else { for (const BasicBlockList* bl = fgReturnBlocks; bl != nullptr; bl = bl->next) { printf(" " FMT_BB, bl->block->bbNum); } } printf("\n"); } #endif // DEBUG } //------------------------------------------------------------------------ // fgComputeEnterBlocksSet: Compute the entry blocks set. // // Initialize fgEnterBlks to the set of blocks for which we don't have explicit control // flow edges. These are the entry basic block and each of the EH handler blocks. // For ARM, also include the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair, // to avoid creating "retless" calls, since we need the BBJ_ALWAYS for the purpose // of unwinding, even if the call doesn't return (due to an explicit throw, for example). // void Compiler::fgComputeEnterBlocksSet() { #ifdef DEBUG fgEnterBlksSetValid = false; #endif // DEBUG fgEnterBlks = BlockSetOps::MakeEmpty(this); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) fgAlwaysBlks = BlockSetOps::MakeEmpty(this); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) /* Now set the entry basic block */ BlockSetOps::AddElemD(this, fgEnterBlks, fgFirstBB->bbNum); assert(fgFirstBB->bbNum == 1); if (compHndBBtabCount > 0) { /* Also 'or' in the handler basic blocks */ for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->HasFilter()) { BlockSetOps::AddElemD(this, fgEnterBlks, HBtab->ebdFilter->bbNum); } BlockSetOps::AddElemD(this, fgEnterBlks, HBtab->ebdHndBeg->bbNum); } } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // For ARM code, prevent creating retless calls by adding the BBJ_ALWAYS to the "fgAlwaysBlks" list. for (BasicBlock* const block : Blocks()) { if (block->bbJumpKind == BBJ_CALLFINALLY) { assert(block->isBBCallAlwaysPair()); // Don't remove the BBJ_ALWAYS block that is only here for the unwinder. BlockSetOps::AddElemD(this, fgAlwaysBlks, block->bbNext->bbNum); } } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #ifdef DEBUG if (verbose) { printf("Enter blocks: "); BlockSetOps::Iter iter(this, fgEnterBlks); unsigned bbNum = 0; while (iter.NextElem(&bbNum)) { printf(FMT_BB " ", bbNum); } printf("\n"); } #endif // DEBUG #ifdef DEBUG fgEnterBlksSetValid = true; #endif // DEBUG } //------------------------------------------------------------------------ // fgRemoveUnreachableBlocks: Remove unreachable blocks. // // Some blocks (marked with BBF_DONT_REMOVE) can't be removed even if unreachable, in which case they // are converted to `throw` blocks. Internal throw helper blocks and the single return block (if any) // are never considered unreachable. // // Return Value: // Return true if changes were made that may cause additional blocks to be removable. // // Assumptions: // The reachability sets must be computed and valid. // bool Compiler::fgRemoveUnreachableBlocks() { assert(!fgCheapPredsValid); assert(fgReachabilitySetsValid); bool hasUnreachableBlocks = false; bool changed = false; /* Record unreachable blocks */ for (BasicBlock* const block : Blocks()) { /* Internal throw blocks are also reachable */ if (fgIsThrowHlpBlk(block)) { continue; } else if (block == genReturnBB) { // Don't remove statements for the genReturnBB block, as we might have special hookups there. // For example, the profiler hookup needs to have the "void GT_RETURN" statement // to properly set the info.compProfilerCallback flag. continue; } else { // If any of the entry blocks can reach this block, then we skip it. if (!BlockSetOps::IsEmptyIntersection(this, fgEnterBlks, block->bbReach)) { continue; } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (!BlockSetOps::IsEmptyIntersection(this, fgAlwaysBlks, block->bbReach)) { continue; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } // Remove all the code for the block fgUnreachableBlock(block); // Make sure that the block was marked as removed */ noway_assert(block->bbFlags & BBF_REMOVED); // Some blocks mark the end of trys and catches // and can't be removed. We convert these into // empty blocks of type BBJ_THROW if (block->bbFlags & BBF_DONT_REMOVE) { const bool bIsBBCallAlwaysPair = block->isBBCallAlwaysPair(); // Unmark the block as removed, clear BBF_INTERNAL, and set BBJ_IMPORTED // The successors may be unreachable after this change. changed |= block->NumSucc() > 0; block->bbFlags &= ~(BBF_REMOVED | BBF_INTERNAL); block->bbFlags |= BBF_IMPORTED; block->bbJumpKind = BBJ_THROW; block->bbSetRunRarely(); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // If this is a <BBJ_CALLFINALLY, BBJ_ALWAYS> pair, we have to clear BBF_FINALLY_TARGET flag on // the target node (of BBJ_ALWAYS) since BBJ_CALLFINALLY node is getting converted to a BBJ_THROW. if (bIsBBCallAlwaysPair) { noway_assert(block->bbNext->bbJumpKind == BBJ_ALWAYS); fgClearFinallyTargetBit(block->bbNext->bbJumpDest); } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } else { /* We have to call fgRemoveBlock next */ hasUnreachableBlocks = true; changed = true; } } if (hasUnreachableBlocks) { // Now remove the unreachable blocks for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) { // If we marked a block with BBF_REMOVED then we need to call fgRemoveBlock() on it if (block->bbFlags & BBF_REMOVED) { fgRemoveBlock(block, /* unreachable */ true); // TODO: couldn't we have fgRemoveBlock() return the block after the (last)one removed // so we don't need the code below? // When we have a BBJ_CALLFINALLY, BBJ_ALWAYS pair; fgRemoveBlock will remove // both blocks, so we must advance 1 extra place in the block list // if (block->isBBCallAlwaysPair()) { block = block->bbNext; } } } } return changed; } //------------------------------------------------------------------------ // fgComputeReachability: Compute the dominator and reachable sets. // // Use `fgReachable()` to check reachability, `fgDominate()` to check dominance. // // Also, compute the list of return blocks `fgReturnBlocks` and set of enter blocks `fgEnterBlks`. // Delete unreachable blocks. // // Assumptions: // Assumes the predecessor lists are computed and correct. // void Compiler::fgComputeReachability() { #ifdef DEBUG if (verbose) { printf("*************** In fgComputeReachability\n"); } fgVerifyHandlerTab(); // Make sure that the predecessor lists are accurate assert(fgComputePredsDone); fgDebugCheckBBlist(); #endif // DEBUG fgComputeReturnBlocks(); // Compute reachability and then delete blocks determined to be unreachable. If we delete blocks, we // need to loop, as that might have caused more blocks to become unreachable. This can happen in the // case where a call to a finally is unreachable and deleted (maybe the call to the finally is // preceded by a throw or an infinite loop), making the blocks following the finally unreachable. // However, all EH entry blocks are considered global entry blocks, causing the blocks following the // call to the finally to stay rooted, until a second round of reachability is done. // The dominator algorithm expects that all blocks can be reached from the fgEnterBlks set. unsigned passNum = 1; bool changed; do { // Just to be paranoid, avoid infinite loops; fall back to minopts. if (passNum > 10) { noway_assert(!"Too many unreachable block removal loops"); } // Walk the flow graph, reassign block numbers to keep them in ascending order. JITDUMP("\nRenumbering the basic blocks for fgComputeReachability pass #%u\n", passNum); passNum++; fgRenumberBlocks(); // // Compute fgEnterBlks // fgComputeEnterBlocksSet(); // // Compute bbReach // fgComputeReachabilitySets(); // // Use reachability information to delete unreachable blocks. // changed = fgRemoveUnreachableBlocks(); } while (changed); #ifdef DEBUG if (verbose) { printf("\nAfter computing reachability:\n"); fgDispBasicBlocks(verboseTrees); printf("\n"); } fgVerifyHandlerTab(); fgDebugCheckBBlist(true); #endif // DEBUG // // Now, compute the dominators // fgComputeDoms(); } //------------------------------------------------------------- // fgDfsInvPostOrder: Helper function for computing dominance information. // // In order to be able to compute dominance, we need to first get a DFS reverse post order sort on the basic flow // graph for the dominance algorithm to operate correctly. The reason why we need the DFS sort is because we will // build the dominance sets using the partial order induced by the DFS sorting. With this precondition not // holding true, the algorithm doesn't work properly. // void Compiler::fgDfsInvPostOrder() { // NOTE: This algorithm only pays attention to the actual blocks. It ignores the imaginary entry block. // visited : Once we run the DFS post order sort recursive algorithm, we mark the nodes we visited to avoid // backtracking. BlockSet visited(BlockSetOps::MakeEmpty(this)); // We begin by figuring out which basic blocks don't have incoming edges and mark them as // start nodes. Later on we run the recursive algorithm for each node that we // mark in this step. BlockSet_ValRet_T startNodes = fgDomFindStartNodes(); // Make sure fgEnterBlks are still there in startNodes, even if they participate in a loop (i.e., there is // an incoming edge into the block). assert(fgEnterBlksSetValid); BlockSetOps::UnionD(this, startNodes, fgEnterBlks); assert(BlockSetOps::IsMember(this, startNodes, fgFirstBB->bbNum)); // Call the flowgraph DFS traversal helper. unsigned postIndex = 1; for (BasicBlock* const block : Blocks()) { // If the block has no predecessors, and we haven't already visited it (because it's in fgEnterBlks but also // reachable from the first block), go ahead and traverse starting from this block. if (BlockSetOps::IsMember(this, startNodes, block->bbNum) && !BlockSetOps::IsMember(this, visited, block->bbNum)) { fgDfsInvPostOrderHelper(block, visited, &postIndex); } } // After the DFS reverse postorder is completed, we must have visited all the basic blocks. noway_assert(postIndex == fgBBcount + 1); noway_assert(fgBBNumMax == fgBBcount); #ifdef DEBUG if (0 && verbose) { printf("\nAfter doing a post order traversal of the BB graph, this is the ordering:\n"); for (unsigned i = 1; i <= fgBBNumMax; ++i) { printf("%02u -> " FMT_BB "\n", i, fgBBInvPostOrder[i]->bbNum); } printf("\n"); } #endif // DEBUG } //------------------------------------------------------------- // fgDomFindStartNodes: Helper for dominance computation to find the start nodes block set. // // The start nodes is a set that represents which basic blocks in the flow graph don't have incoming edges. // We begin assuming everything is a start block and remove any block that is a successor of another. // // Returns: // Block set of start nodes. // BlockSet_ValRet_T Compiler::fgDomFindStartNodes() { BlockSet startNodes(BlockSetOps::MakeFull(this)); for (BasicBlock* const block : Blocks()) { for (BasicBlock* const succ : block->Succs(this)) { BlockSetOps::RemoveElemD(this, startNodes, succ->bbNum); } } #ifdef DEBUG if (verbose) { printf("\nDominator computation start blocks (those blocks with no incoming edges):\n"); BlockSetOps::Iter iter(this, startNodes); unsigned bbNum = 0; while (iter.NextElem(&bbNum)) { printf(FMT_BB " ", bbNum); } printf("\n"); } #endif // DEBUG return startNodes; } //------------------------------------------------------------------------ // fgDfsInvPostOrderHelper: Helper to assign post-order numbers to blocks. // // Arguments: // block - The starting entry block // visited - The set of visited blocks // count - Pointer to the Dfs counter // // Notes: // Compute a non-recursive DFS traversal of the flow graph using an // evaluation stack to assign post-order numbers. // void Compiler::fgDfsInvPostOrderHelper(BasicBlock* block, BlockSet& visited, unsigned* count) { // Assume we haven't visited this node yet (callers ensure this). assert(!BlockSetOps::IsMember(this, visited, block->bbNum)); // Allocate a local stack to hold the DFS traversal actions necessary // to compute pre/post-ordering of the control flowgraph. ArrayStack<DfsBlockEntry> stack(getAllocator(CMK_ArrayStack)); // Push the first block on the stack to seed the traversal. stack.Push(DfsBlockEntry(DSS_Pre, block)); // Flag the node we just visited to avoid backtracking. BlockSetOps::AddElemD(this, visited, block->bbNum); // The search is terminated once all the actions have been processed. while (!stack.Empty()) { DfsBlockEntry current = stack.Pop(); BasicBlock* currentBlock = current.dfsBlock; if (current.dfsStackState == DSS_Pre) { // This is a pre-visit that corresponds to the first time the // node is encountered in the spanning tree and receives pre-order // numberings. By pushing the post-action on the stack here we // are guaranteed to only process it after all of its successors // pre and post actions are processed. stack.Push(DfsBlockEntry(DSS_Post, currentBlock)); for (BasicBlock* const succ : currentBlock->Succs(this)) { // If this is a node we haven't seen before, go ahead and process if (!BlockSetOps::IsMember(this, visited, succ->bbNum)) { // Push a pre-visit action for this successor onto the stack and // mark it as visited in case this block has multiple successors // to the same node (multi-graph). stack.Push(DfsBlockEntry(DSS_Pre, succ)); BlockSetOps::AddElemD(this, visited, succ->bbNum); } } } else { // This is a post-visit that corresponds to the last time the // node is visited in the spanning tree and only happens after // all descendents in the spanning tree have had pre and post // actions applied. assert(current.dfsStackState == DSS_Post); unsigned invCount = fgBBcount - *count + 1; assert(1 <= invCount && invCount <= fgBBNumMax); fgBBInvPostOrder[invCount] = currentBlock; currentBlock->bbPostOrderNum = invCount; ++(*count); } } } //------------------------------------------------------------------------ // fgComputeDoms: Computer dominators. Use `fgDominate()` to check dominance. // // Compute immediate dominators, the dominator tree and and its pre/post-order traversal numbers. // // Also sets BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY flag on blocks dominated by exceptional entry blocks. // // Notes: // Immediate dominator computation is based on "A Simple, Fast Dominance Algorithm" // by Keith D. Cooper, Timothy J. Harvey, and Ken Kennedy. // void Compiler::fgComputeDoms() { assert(!fgCheapPredsValid); #ifdef DEBUG if (verbose) { printf("*************** In fgComputeDoms\n"); } fgVerifyHandlerTab(); // Make sure that the predecessor lists are accurate. // Also check that the blocks are properly, densely numbered (so calling fgRenumberBlocks is not necessary). fgDebugCheckBBlist(true); // Assert things related to the BlockSet epoch. assert(fgBBcount == fgBBNumMax); assert(BasicBlockBitSetTraits::GetSize(this) == fgBBNumMax + 1); #endif // DEBUG BlockSet processedBlks(BlockSetOps::MakeEmpty(this)); fgBBInvPostOrder = new (this, CMK_DominatorMemory) BasicBlock*[fgBBNumMax + 1]{}; fgDfsInvPostOrder(); noway_assert(fgBBInvPostOrder[0] == nullptr); // flRoot and bbRoot represent an imaginary unique entry point in the flow graph. // All the orphaned EH blocks and fgFirstBB will temporarily have its predecessors list // (with bbRoot as the only basic block in it) set as flRoot. // Later on, we clear their predecessors and let them to be nullptr again. // Since we number basic blocks starting at one, the imaginary entry block is conveniently numbered as zero. BasicBlock bbRoot; bbRoot.bbPreds = nullptr; bbRoot.bbNum = 0; bbRoot.bbIDom = &bbRoot; bbRoot.bbPostOrderNum = 0; bbRoot.bbFlags = BBF_EMPTY; flowList flRoot(&bbRoot, nullptr); fgBBInvPostOrder[0] = &bbRoot; // Mark both bbRoot and fgFirstBB processed BlockSetOps::AddElemD(this, processedBlks, 0); // bbRoot == block #0 BlockSetOps::AddElemD(this, processedBlks, 1); // fgFirstBB == block #1 assert(fgFirstBB->bbNum == 1); // Special case fgFirstBB to say its IDom is bbRoot. fgFirstBB->bbIDom = &bbRoot; BasicBlock* block = nullptr; for (block = fgFirstBB->bbNext; block != nullptr; block = block->bbNext) { // If any basic block has no predecessors then we flag it as processed and temporarily // mark its precedessor list to be flRoot. This makes the flowgraph connected, // a precondition that is needed by the dominance algorithm to operate properly. if (block->bbPreds == nullptr) { block->bbPreds = &flRoot; block->bbIDom = &bbRoot; BlockSetOps::AddElemD(this, processedBlks, block->bbNum); } else { block->bbIDom = nullptr; } } // Mark the EH blocks as entry blocks and also flag them as processed. if (compHndBBtabCount > 0) { for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->HasFilter()) { HBtab->ebdFilter->bbIDom = &bbRoot; BlockSetOps::AddElemD(this, processedBlks, HBtab->ebdFilter->bbNum); } HBtab->ebdHndBeg->bbIDom = &bbRoot; BlockSetOps::AddElemD(this, processedBlks, HBtab->ebdHndBeg->bbNum); } } // Now proceed to compute the immediate dominators for each basic block. bool changed = true; while (changed) { changed = false; // Process each actual block; don't process the imaginary predecessor block. for (unsigned i = 1; i <= fgBBNumMax; ++i) { flowList* first = nullptr; BasicBlock* newidom = nullptr; block = fgBBInvPostOrder[i]; // If we have a block that has bbRoot as its bbIDom // it means we flag it as processed and as an entry block so // in this case we're all set. if (block->bbIDom == &bbRoot) { continue; } // Pick up the first processed predecesor of the current block. for (first = block->bbPreds; first != nullptr; first = first->flNext) { if (BlockSetOps::IsMember(this, processedBlks, first->getBlock()->bbNum)) { break; } } noway_assert(first != nullptr); // We assume the first processed predecessor will be the // immediate dominator and then compute the forward flow analysis. newidom = first->getBlock(); for (flowList* p = block->bbPreds; p != nullptr; p = p->flNext) { if (p->getBlock() == first->getBlock()) { continue; } if (p->getBlock()->bbIDom != nullptr) { // fgIntersectDom is basically the set intersection between // the dominance sets of the new IDom and the current predecessor // Since the nodes are ordered in DFS inverse post order and // IDom induces a tree, fgIntersectDom actually computes // the lowest common ancestor in the dominator tree. newidom = fgIntersectDom(p->getBlock(), newidom); } } // If the Immediate dominator changed, assign the new one // to the current working basic block. if (block->bbIDom != newidom) { noway_assert(newidom != nullptr); block->bbIDom = newidom; changed = true; } BlockSetOps::AddElemD(this, processedBlks, block->bbNum); } } // As stated before, once we have computed immediate dominance we need to clear // all the basic blocks whose predecessor list was set to flRoot. This // reverts that and leaves the blocks the same as before. for (BasicBlock* const block : Blocks()) { if (block->bbPreds == &flRoot) { block->bbPreds = nullptr; } } fgCompDominatedByExceptionalEntryBlocks(); #ifdef DEBUG if (verbose) { fgDispDoms(); } #endif fgNumberDomTree(fgBuildDomTree()); fgModified = false; fgDomBBcount = fgBBcount; assert(fgBBcount == fgBBNumMax); assert(BasicBlockBitSetTraits::GetSize(this) == fgDomBBcount + 1); fgDomsComputed = true; } //------------------------------------------------------------------------ // fgBuildDomTree: Build the dominator tree for the current flowgraph. // // Returns: // An array of dominator tree nodes, indexed by BasicBlock::bbNum. // // Notes: // Immediate dominators must have already been computed in BasicBlock::bbIDom // before calling this. // DomTreeNode* Compiler::fgBuildDomTree() { JITDUMP("\nInside fgBuildDomTree\n"); unsigned bbArraySize = fgBBNumMax + 1; DomTreeNode* domTree = new (this, CMK_DominatorMemory) DomTreeNode[bbArraySize]{}; BasicBlock* imaginaryRoot = fgFirstBB->bbIDom; if (imaginaryRoot != nullptr) { // If the first block has a dominator then this must be the imaginary entry block added // by fgComputeDoms, it is not actually part of the flowgraph and should have number 0. assert(imaginaryRoot->bbNum == 0); assert(imaginaryRoot->bbIDom == imaginaryRoot); // Clear the imaginary dominator to turn the tree back to a forest. fgFirstBB->bbIDom = nullptr; } // If the imaginary root is present then we'll need to create a forest instead of a tree. // Forest roots are chained via DomTreeNode::nextSibling and we keep track of this list's // tail in order to append to it. The head of the list is fgFirstBB, by construction. BasicBlock* rootListTail = fgFirstBB; // Traverse the entire block list to build the dominator tree. Skip fgFirstBB // as it is always a root of the dominator forest. for (BasicBlock* const block : Blocks(fgFirstBB->bbNext)) { BasicBlock* parent = block->bbIDom; if (parent != imaginaryRoot) { assert(block->bbNum < bbArraySize); assert(parent->bbNum < bbArraySize); domTree[block->bbNum].nextSibling = domTree[parent->bbNum].firstChild; domTree[parent->bbNum].firstChild = block; } else if (imaginaryRoot != nullptr) { assert(rootListTail->bbNum < bbArraySize); domTree[rootListTail->bbNum].nextSibling = block; rootListTail = block; // Clear the imaginary dominator to turn the tree back to a forest. block->bbIDom = nullptr; } } JITDUMP("\nAfter computing the Dominance Tree:\n"); DBEXEC(verbose, fgDispDomTree(domTree)); return domTree; } #ifdef DEBUG void Compiler::fgDispDomTree(DomTreeNode* domTree) { for (unsigned i = 1; i <= fgBBNumMax; ++i) { if (domTree[i].firstChild != nullptr) { printf(FMT_BB " : ", i); for (BasicBlock* child = domTree[i].firstChild; child != nullptr; child = domTree[child->bbNum].nextSibling) { printf(FMT_BB " ", child->bbNum); } printf("\n"); } } printf("\n"); } #endif // DEBUG //------------------------------------------------------------------------ // fgNumberDomTree: Assign pre/post-order numbers to the dominator tree. // // Arguments: // domTree - The dominator tree node array // // Notes: // Runs a non-recursive DFS traversal of the dominator tree to assign // pre-order and post-order numbers. These numbers are used to provide // constant time lookup ancestor/descendent tests between pairs of nodes // in the tree. // void Compiler::fgNumberDomTree(DomTreeNode* domTree) { class NumberDomTreeVisitor : public DomTreeVisitor<NumberDomTreeVisitor> { unsigned m_preNum; unsigned m_postNum; public: NumberDomTreeVisitor(Compiler* compiler, DomTreeNode* domTree) : DomTreeVisitor(compiler, domTree) { } void Begin() { unsigned bbArraySize = m_compiler->fgBBNumMax + 1; m_compiler->fgDomTreePreOrder = new (m_compiler, CMK_DominatorMemory) unsigned[bbArraySize]{}; m_compiler->fgDomTreePostOrder = new (m_compiler, CMK_DominatorMemory) unsigned[bbArraySize]{}; // The preorder and postorder numbers. // We start from 1 to match the bbNum ordering. m_preNum = 1; m_postNum = 1; } void PreOrderVisit(BasicBlock* block) { m_compiler->fgDomTreePreOrder[block->bbNum] = m_preNum++; } void PostOrderVisit(BasicBlock* block) { m_compiler->fgDomTreePostOrder[block->bbNum] = m_postNum++; } void End() { noway_assert(m_preNum == m_compiler->fgBBNumMax + 1); noway_assert(m_postNum == m_compiler->fgBBNumMax + 1); noway_assert(m_compiler->fgDomTreePreOrder[0] == 0); // Unused first element noway_assert(m_compiler->fgDomTreePostOrder[0] == 0); // Unused first element noway_assert(m_compiler->fgDomTreePreOrder[1] == 1); // First block should be first in pre order #ifdef DEBUG if (m_compiler->verbose) { printf("\nAfter numbering the dominator tree:\n"); for (unsigned i = 1; i <= m_compiler->fgBBNumMax; ++i) { printf(FMT_BB ": pre=%02u, post=%02u\n", i, m_compiler->fgDomTreePreOrder[i], m_compiler->fgDomTreePostOrder[i]); } } #endif // DEBUG } }; NumberDomTreeVisitor visitor(this, domTree); visitor.WalkTree(); } //------------------------------------------------------------- // fgIntersectDom: Intersect two immediate dominator sets. // // Find the lowest common ancestor in the dominator tree between two basic blocks. The LCA in the dominance tree // represents the closest dominator between the two basic blocks. Used to adjust the IDom value in fgComputDoms. // // Arguments: // a, b - two blocks to intersect // // Returns: // The least common ancestor of `a` and `b` in the IDom tree. // BasicBlock* Compiler::fgIntersectDom(BasicBlock* a, BasicBlock* b) { BasicBlock* finger1 = a; BasicBlock* finger2 = b; while (finger1 != finger2) { while (finger1->bbPostOrderNum > finger2->bbPostOrderNum) { finger1 = finger1->bbIDom; } while (finger2->bbPostOrderNum > finger1->bbPostOrderNum) { finger2 = finger2->bbIDom; } } return finger1; } //------------------------------------------------------------- // fgGetDominatorSet: Return a set of blocks that dominate `block`. // // Note: this is slow compared to calling fgDominate(), especially if doing a single check comparing // two blocks. // // Arguments: // block - get the set of blocks which dominate this block // // Returns: // A set of blocks which dominate `block`. // BlockSet_ValRet_T Compiler::fgGetDominatorSet(BasicBlock* block) { assert(block != nullptr); BlockSet domSet(BlockSetOps::MakeEmpty(this)); do { BlockSetOps::AddElemD(this, domSet, block->bbNum); if (block == block->bbIDom) { break; // We found a cycle in the IDom list, so we're done. } block = block->bbIDom; } while (block != nullptr); return domSet; } //------------------------------------------------------------- // fgInitBlockVarSets: Initialize the per-block variable sets (used for liveness analysis). // // Notes: // Initializes: // bbVarUse, bbVarDef, bbLiveIn, bbLiveOut, // bbMemoryUse, bbMemoryDef, bbMemoryLiveIn, bbMemoryLiveOut, // bbScope // void Compiler::fgInitBlockVarSets() { for (BasicBlock* const block : Blocks()) { block->InitVarSets(this); } fgBBVarSetsInited = true; } //------------------------------------------------------------------------ // fgPostImportationCleanups: clean up flow graph after importation // // Notes: // // Find and remove any basic blocks that are useless (e.g. they have not been // imported because they are not reachable, or they have been optimized away). // // Remove try regions where no blocks in the try were imported. // Update the end of try and handler regions where trailing blocks were not imported. // Update the start of try regions that were partially imported (OSR) // // For OSR, add "step blocks" and conditional logic to ensure the path from // method entry to the OSR logical entry point always flows through the first // block of any enclosing try. // // In particular, given a method like // // S0; // try { // S1; // try { // S2; // for (...) {} // OSR logical entry here // } // } // // Where the Sn are arbitrary hammocks of code, the OSR logical entry point // would be in the middle of a nested try. We can't branch there directly // from the OSR method entry. So we transform the flow to: // // _firstCall = 0; // goto pt1; // S0; // pt1: // try { // if (_firstCall == 0) goto pt2; // S1; // pt2: // try { // if (_firstCall == 0) goto pp; // S2; // pp: // _firstCall = 1; // for (...) // } // } // // where the "state variable" _firstCall guides execution appropriately // from OSR method entry, and flow always enters the try blocks at the // first block of the try. // void Compiler::fgPostImportationCleanup() { JITDUMP("\n*************** In fgPostImportationCleanup\n"); BasicBlock* cur; BasicBlock* nxt; // If we remove any blocks, we'll have to do additional work unsigned removedBlks = 0; for (cur = fgFirstBB; cur != nullptr; cur = nxt) { // Get hold of the next block (in case we delete 'cur') nxt = cur->bbNext; // Should this block be removed? if (!(cur->bbFlags & BBF_IMPORTED)) { noway_assert(cur->isEmpty()); if (ehCanDeleteEmptyBlock(cur)) { JITDUMP(FMT_BB " was not imported, marking as removed (%d)\n", cur->bbNum, removedBlks); cur->bbFlags |= BBF_REMOVED; removedBlks++; // Drop the block from the list. // // We rely on the fact that this does not clear out // cur->bbNext or cur->bbPrev in the code that // follows. fgUnlinkBlock(cur); } else { // We were prevented from deleting this block by EH // normalization. Mark the block as imported. cur->bbFlags |= BBF_IMPORTED; } } } // If no blocks were removed, we're done. // Unless we are an OSR method with a try entry. // if ((removedBlks == 0) && !(opts.IsOSR() && fgOSREntryBB->hasTryIndex())) { return; } // Update all references in the exception handler table. // // We may have made the entire try block unreachable. // Check for this case and remove the entry from the EH table. // // For OSR, just the initial part of a try range may become // unreachable; if so we need to shrink the try range down // to the portion that was imported. unsigned XTnum; EHblkDsc* HBtab; unsigned delCnt = 0; // Walk the EH regions from inner to outer for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { AGAIN: // If start of a try region was not imported, then we either // need to trim the region extent, or remove the region // entirely. // // In normal importation, it is not valid to jump into the // middle of a try, so if the try entry was not imported, the // entire try can be removed. // // In OSR importation the entry patchpoint may be in the // middle of a try, and we need to determine how much of the // try ended up getting imported. Because of backwards // branches we may end up importing the entire try even though // execution starts in the middle. // // Note it is common in both cases for the ends of trys (and // associated handlers) to end up not getting imported, so if // the try region is not removed, we always check if we need // to trim the ends. // if (HBtab->ebdTryBeg->bbFlags & BBF_REMOVED) { // Usual case is that the entire try can be removed. bool removeTryRegion = true; if (opts.IsOSR()) { // For OSR we may need to trim the try region start. // // We rely on the fact that removed blocks have been snipped from // the main block list, but that those removed blocks have kept // their bbprev (and bbnext) links. // // Find the first unremoved block before the try entry block. // BasicBlock* const oldTryEntry = HBtab->ebdTryBeg; BasicBlock* tryEntryPrev = oldTryEntry->bbPrev; while ((tryEntryPrev != nullptr) && ((tryEntryPrev->bbFlags & BBF_REMOVED) != 0)) { tryEntryPrev = tryEntryPrev->bbPrev; } // Because we've added an unremovable scratch block as // fgFirstBB, this backwards walk should always find // some block. assert(tryEntryPrev != nullptr); // If there is a next block of this prev block, and that block is // contained in the current try, we'd like to make that block // the new start of the try, and keep the region. BasicBlock* newTryEntry = tryEntryPrev->bbNext; bool updateTryEntry = false; if ((newTryEntry != nullptr) && bbInTryRegions(XTnum, newTryEntry)) { // We want to trim the begin extent of the current try region to newTryEntry. // // This method is invoked after EH normalization, so we may need to ensure all // try regions begin at blocks that are not the start or end of some other try. // // So, see if this block is already the start or end of some other EH region. if (bbIsTryBeg(newTryEntry)) { // We've already end-trimmed the inner try. Do the same now for the // current try, so it is easier to detect when they mutually protect. // (we will call this again later, which is harmless). fgSkipRmvdBlocks(HBtab); // If this try and the inner try form a "mutually protected try region" // then we must continue to share the try entry block. EHblkDsc* const HBinner = ehGetBlockTryDsc(newTryEntry); assert(HBinner->ebdTryBeg == newTryEntry); if (HBtab->ebdTryLast != HBinner->ebdTryLast) { updateTryEntry = true; } } // Also, a try and handler cannot start at the same block else if (bbIsHandlerBeg(newTryEntry)) { updateTryEntry = true; } if (updateTryEntry) { // We need to trim the current try to begin at a different block. Normally // this would be problematic as we don't have enough context to redirect // all the incoming edges, but we know oldTryEntry is unreachable. // So there are no incoming edges to worry about. // assert(!tryEntryPrev->bbFallsThrough()); // What follows is similar to fgNewBBInRegion, but we can't call that // here as the oldTryEntry is no longer in the main bb list. newTryEntry = bbNewBasicBlock(BBJ_NONE); newTryEntry->bbFlags |= (BBF_IMPORTED | BBF_INTERNAL); // Set the right EH region indices on this new block. // // Patchpoints currently cannot be inside handler regions, // and so likewise the old and new try region entries. assert(!oldTryEntry->hasHndIndex()); newTryEntry->setTryIndex(XTnum); newTryEntry->clearHndIndex(); fgInsertBBafter(tryEntryPrev, newTryEntry); // Generally this (unreachable) empty new try entry block can fall through // to the next block, but in cases where there's a nested try with an // out of order handler, the next block may be a handler. So even though // this new try entry block is unreachable, we need to give it a // plausible flow target. Simplest is to just mark it as a throw. if (bbIsHandlerBeg(newTryEntry->bbNext)) { newTryEntry->bbJumpKind = BBJ_THROW; } JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to new " FMT_BB "\n", XTnum + delCnt, oldTryEntry->bbNum, newTryEntry->bbNum); } else { // We can just trim the try to newTryEntry as it is not part of some inner try or handler. JITDUMP("OSR: changing start of try region #%u from " FMT_BB " to " FMT_BB "\n", XTnum + delCnt, oldTryEntry->bbNum, newTryEntry->bbNum); } // Update the handler table fgSetTryBeg(HBtab, newTryEntry); // Try entry blocks get specially marked and have special protection. HBtab->ebdTryBeg->bbFlags |= BBF_DONT_REMOVE | BBF_TRY_BEG; // We are keeping this try region removeTryRegion = false; } } if (removeTryRegion) { // In the dump, refer to the region by its original index. JITDUMP("Try region #%u (" FMT_BB " -- " FMT_BB ") not imported, removing try from the EH table\n", XTnum + delCnt, HBtab->ebdTryBeg->bbNum, HBtab->ebdTryLast->bbNum); delCnt++; fgRemoveEHTableEntry(XTnum); if (XTnum < compHndBBtabCount) { // There are more entries left to process, so do more. Note that // HBtab now points to the next entry, that we copied down to the // current slot. XTnum also stays the same. goto AGAIN; } // no more entries (we deleted the last one), so exit the loop break; } } // If we get here, the try entry block was not removed. // Check some invariants. assert(HBtab->ebdTryBeg->bbFlags & BBF_IMPORTED); assert(HBtab->ebdTryBeg->bbFlags & BBF_DONT_REMOVE); assert(HBtab->ebdHndBeg->bbFlags & BBF_IMPORTED); assert(HBtab->ebdHndBeg->bbFlags & BBF_DONT_REMOVE); if (HBtab->HasFilter()) { assert(HBtab->ebdFilter->bbFlags & BBF_IMPORTED); assert(HBtab->ebdFilter->bbFlags & BBF_DONT_REMOVE); } // Finally, do region end trimming -- update try and handler ends to reflect removed blocks. fgSkipRmvdBlocks(HBtab); } // If this is OSR, and the OSR entry was mid-try or in a nested try entry, // add the appropriate step block logic. // if (opts.IsOSR()) { BasicBlock* const osrEntry = fgOSREntryBB; BasicBlock* entryJumpTarget = osrEntry; if (osrEntry->hasTryIndex()) { EHblkDsc* enclosingTry = ehGetBlockTryDsc(osrEntry); BasicBlock* tryEntry = enclosingTry->ebdTryBeg; bool const inNestedTry = (enclosingTry->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX); bool const osrEntryMidTry = (osrEntry != tryEntry); if (inNestedTry || osrEntryMidTry) { JITDUMP("OSR Entry point at IL offset 0x%0x (" FMT_BB ") is %s%s try region EH#%u\n", info.compILEntry, osrEntry->bbNum, osrEntryMidTry ? "within " : "at the start of ", inNestedTry ? "nested" : "", osrEntry->getTryIndex()); // We'll need a state variable to control the branching. // // It will be initialized to zero when the OSR method is entered and set to one // once flow reaches the osrEntry. // unsigned const entryStateVar = lvaGrabTemp(false DEBUGARG("OSR entry state var")); lvaTable[entryStateVar].lvType = TYP_INT; // Zero the entry state at method entry. // GenTree* const initEntryState = gtNewTempAssign(entryStateVar, gtNewZeroConNode(TYP_INT)); fgNewStmtAtBeg(fgFirstBB, initEntryState); // Set the state variable once control flow reaches the OSR entry. // GenTree* const setEntryState = gtNewTempAssign(entryStateVar, gtNewOneConNode(TYP_INT)); fgNewStmtAtBeg(osrEntry, setEntryState); // Helper method to add flow // auto addConditionalFlow = [this, entryStateVar, &entryJumpTarget](BasicBlock* fromBlock, BasicBlock* toBlock) { // We may have previously though this try entry was unreachable, but now we're going to // step through it on the way to the OSR entry. So ensure it has plausible profile weight. // if (fgHaveProfileData() && !fromBlock->hasProfileWeight()) { JITDUMP("Updating block weight for now-reachable try entry " FMT_BB " via " FMT_BB "\n", fromBlock->bbNum, fgFirstBB->bbNum); fromBlock->inheritWeight(fgFirstBB); } BasicBlock* const newBlock = fgSplitBlockAtBeginning(fromBlock); fromBlock->bbFlags |= BBF_INTERNAL; newBlock->bbFlags &= ~BBF_DONT_REMOVE; GenTree* const entryStateLcl = gtNewLclvNode(entryStateVar, TYP_INT); GenTree* const compareEntryStateToZero = gtNewOperNode(GT_EQ, TYP_INT, entryStateLcl, gtNewZeroConNode(TYP_INT)); GenTree* const jumpIfEntryStateZero = gtNewOperNode(GT_JTRUE, TYP_VOID, compareEntryStateToZero); fgNewStmtAtBeg(fromBlock, jumpIfEntryStateZero); fromBlock->bbJumpKind = BBJ_COND; fromBlock->bbJumpDest = toBlock; fgAddRefPred(toBlock, fromBlock); newBlock->inheritWeight(fromBlock); entryJumpTarget = fromBlock; }; // If this is a mid-try entry, add a conditional branch from the start of the try to osr entry point. // if (osrEntryMidTry) { addConditionalFlow(tryEntry, osrEntry); } // Add conditional branches for each successive enclosing try with a distinct // entry block. // while (enclosingTry->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) { EHblkDsc* const nextTry = ehGetDsc(enclosingTry->ebdEnclosingTryIndex); BasicBlock* const nextTryEntry = nextTry->ebdTryBeg; // We don't need to add flow for mutual-protect regions // (multiple tries that all share the same entry block). // if (nextTryEntry != tryEntry) { addConditionalFlow(nextTryEntry, tryEntry); } enclosingTry = nextTry; tryEntry = nextTryEntry; } // Transform the method entry flow, if necessary. // // Note even if the OSR is in a nested try, if it's a mutual protect try // it can be reached directly from "outside". // assert(fgFirstBB->bbJumpDest == osrEntry); assert(fgFirstBB->bbJumpKind == BBJ_ALWAYS); if (entryJumpTarget != osrEntry) { fgFirstBB->bbJumpDest = entryJumpTarget; fgRemoveRefPred(osrEntry, fgFirstBB); fgAddRefPred(entryJumpTarget, fgFirstBB); JITDUMP("OSR: redirecting flow from method entry " FMT_BB " to OSR entry " FMT_BB " via step blocks.\n", fgFirstBB->bbNum, fgOSREntryBB->bbNum); } else { JITDUMP("OSR: leaving direct flow from method entry " FMT_BB " to OSR entry " FMT_BB ", no step blocks needed.\n", fgFirstBB->bbNum, fgOSREntryBB->bbNum); } } else { // If OSR entry is the start of an un-nested try, no work needed. // // We won't hit this case today as we don't allow the try entry to be the target of a backedge, // and currently patchpoints only appear at targets of backedges. // JITDUMP("OSR Entry point at IL offset 0x%0x (" FMT_BB ") is start of an un-nested try region, no step blocks needed.\n", info.compILEntry, osrEntry->bbNum); assert(entryJumpTarget == osrEntry); assert(fgOSREntryBB == osrEntry); } } else { // If OSR entry is not within a try, no work needed. // JITDUMP("OSR Entry point at IL offset 0x%0x (" FMT_BB ") is not in a try region, no step blocks needed.\n", info.compILEntry, osrEntry->bbNum); assert(entryJumpTarget == osrEntry); assert(fgOSREntryBB == osrEntry); } } // Renumber the basic blocks JITDUMP("\nRenumbering the basic blocks for fgPostImporterCleanup\n"); fgRenumberBlocks(); #ifdef DEBUG fgVerifyHandlerTab(); #endif // DEBUG } //------------------------------------------------------------- // fgCanCompactBlocks: Determine if a block and its bbNext successor can be compacted. // // Arguments: // block - block to check. If nullptr, return false. // bNext - bbNext of `block`. If nullptr, return false. // // Returns: // true if compaction is allowed // bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) { if ((block == nullptr) || (bNext == nullptr)) { return false; } noway_assert(block->bbNext == bNext); if (block->bbJumpKind != BBJ_NONE) { return false; } // If the next block has multiple incoming edges, we can still compact if the first block is empty. // However, not if it is the beginning of a handler. if (bNext->countOfInEdges() != 1 && (!block->isEmpty() || (block->bbFlags & BBF_FUNCLET_BEG) || (block->bbCatchTyp != BBCT_NONE))) { return false; } if (bNext->bbFlags & BBF_DONT_REMOVE) { return false; } // Don't compact the first block if it was specially created as a scratch block. if (fgBBisScratch(block)) { return false; } // Don't compact away any loop entry blocks that we added in optCanonicalizeLoops if (optIsLoopEntry(block)) { return false; } #if defined(TARGET_ARM) // We can't compact a finally target block, as we need to generate special code for such blocks during code // generation if ((bNext->bbFlags & BBF_FINALLY_TARGET) != 0) return false; #endif // We don't want to compact blocks that are in different Hot/Cold regions // if (fgInDifferentRegions(block, bNext)) { return false; } // We cannot compact two blocks in different EH regions. // if (fgCanRelocateEHRegions) { if (!BasicBlock::sameEHRegion(block, bNext)) { return false; } } // If there is a switch predecessor don't bother because we'd have to update the uniquesuccs as well // (if they are valid). for (BasicBlock* const predBlock : bNext->PredBlocks()) { if (predBlock->bbJumpKind == BBJ_SWITCH) { return false; } } return true; } //------------------------------------------------------------- // fgCompactBlocks: Compact two blocks into one. // // Assumes that all necessary checks have been performed, i.e. fgCanCompactBlocks returns true. // // Uses for this function - whenever we change links, insert blocks, ... // It will keep the flowgraph data in synch - bbNum, bbRefs, bbPreds // // Arguments: // block - move all code into this block. // bNext - bbNext of `block`. This block will be removed. // void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) { noway_assert(block != nullptr); noway_assert((block->bbFlags & BBF_REMOVED) == 0); noway_assert(block->bbJumpKind == BBJ_NONE); noway_assert(bNext == block->bbNext); noway_assert(bNext != nullptr); noway_assert((bNext->bbFlags & BBF_REMOVED) == 0); noway_assert(bNext->countOfInEdges() == 1 || block->isEmpty()); noway_assert(bNext->bbPreds); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) noway_assert((bNext->bbFlags & BBF_FINALLY_TARGET) == 0); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Make sure the second block is not the start of a TRY block or an exception handler noway_assert(bNext->bbCatchTyp == BBCT_NONE); noway_assert((bNext->bbFlags & BBF_TRY_BEG) == 0); noway_assert((bNext->bbFlags & BBF_DONT_REMOVE) == 0); /* both or none must have an exception handler */ noway_assert(block->hasTryIndex() == bNext->hasTryIndex()); #ifdef DEBUG if (verbose) { printf("\nCompacting blocks " FMT_BB " and " FMT_BB ":\n", block->bbNum, bNext->bbNum); } #endif if (bNext->countOfInEdges() > 1) { JITDUMP("Second block has multiple incoming edges\n"); assert(block->isEmpty()); for (BasicBlock* const predBlock : bNext->PredBlocks()) { fgReplaceJumpTarget(predBlock, block, bNext); if (predBlock != block) { fgAddRefPred(block, predBlock); } } bNext->bbPreds = nullptr; // `block` can no longer be a loop pre-header (if it was before). block->bbFlags &= ~BBF_LOOP_PREHEADER; } else { noway_assert(bNext->bbPreds->flNext == nullptr); noway_assert(bNext->bbPreds->getBlock() == block); } /* Start compacting - move all the statements in the second block to the first block */ // First move any phi definitions of the second block after the phi defs of the first. // TODO-CQ: This may be the wrong thing to do. If we're compacting blocks, it's because a // control-flow choice was constant-folded away. So probably phi's need to go away, // as well, in favor of one of the incoming branches. Or at least be modified. assert(block->IsLIR() == bNext->IsLIR()); if (block->IsLIR()) { LIR::Range& blockRange = LIR::AsRange(block); LIR::Range& nextRange = LIR::AsRange(bNext); // Does the next block have any phis? GenTree* nextNode = nextRange.FirstNode(); // Does the block have any code? if (nextNode != nullptr) { LIR::Range nextNodes = nextRange.Remove(nextNode, nextRange.LastNode()); blockRange.InsertAtEnd(std::move(nextNodes)); } } else { Statement* blkNonPhi1 = block->FirstNonPhiDef(); Statement* bNextNonPhi1 = bNext->FirstNonPhiDef(); Statement* blkFirst = block->firstStmt(); Statement* bNextFirst = bNext->firstStmt(); // Does the second have any phis? if (bNextFirst != nullptr && bNextFirst != bNextNonPhi1) { Statement* bNextLast = bNextFirst->GetPrevStmt(); assert(bNextLast->GetNextStmt() == nullptr); // Does "blk" have phis? if (blkNonPhi1 != blkFirst) { // Yes, has phis. // Insert after the last phi of "block." // First, bNextPhis after last phi of block. Statement* blkLastPhi; if (blkNonPhi1 != nullptr) { blkLastPhi = blkNonPhi1->GetPrevStmt(); } else { blkLastPhi = blkFirst->GetPrevStmt(); } blkLastPhi->SetNextStmt(bNextFirst); bNextFirst->SetPrevStmt(blkLastPhi); // Now, rest of "block" after last phi of "bNext". Statement* bNextLastPhi = nullptr; if (bNextNonPhi1 != nullptr) { bNextLastPhi = bNextNonPhi1->GetPrevStmt(); } else { bNextLastPhi = bNextFirst->GetPrevStmt(); } bNextLastPhi->SetNextStmt(blkNonPhi1); if (blkNonPhi1 != nullptr) { blkNonPhi1->SetPrevStmt(bNextLastPhi); } else { // block has no non phis, so make the last statement be the last added phi. blkFirst->SetPrevStmt(bNextLastPhi); } // Now update the bbStmtList of "bNext". bNext->bbStmtList = bNextNonPhi1; if (bNextNonPhi1 != nullptr) { bNextNonPhi1->SetPrevStmt(bNextLast); } } else { if (blkFirst != nullptr) // If "block" has no statements, fusion will work fine... { // First, bNextPhis at start of block. Statement* blkLast = blkFirst->GetPrevStmt(); block->bbStmtList = bNextFirst; // Now, rest of "block" (if it exists) after last phi of "bNext". Statement* bNextLastPhi = nullptr; if (bNextNonPhi1 != nullptr) { // There is a first non phi, so the last phi is before it. bNextLastPhi = bNextNonPhi1->GetPrevStmt(); } else { // All the statements are phi defns, so the last one is the prev of the first. bNextLastPhi = bNextFirst->GetPrevStmt(); } bNextFirst->SetPrevStmt(blkLast); bNextLastPhi->SetNextStmt(blkFirst); blkFirst->SetPrevStmt(bNextLastPhi); // Now update the bbStmtList of "bNext" bNext->bbStmtList = bNextNonPhi1; if (bNextNonPhi1 != nullptr) { bNextNonPhi1->SetPrevStmt(bNextLast); } } } } // Now proceed with the updated bbTreeLists. Statement* stmtList1 = block->firstStmt(); Statement* stmtList2 = bNext->firstStmt(); /* the block may have an empty list */ if (stmtList1 != nullptr) { Statement* stmtLast1 = block->lastStmt(); /* The second block may be a GOTO statement or something with an empty bbStmtList */ if (stmtList2 != nullptr) { Statement* stmtLast2 = bNext->lastStmt(); /* append list2 to list 1 */ stmtLast1->SetNextStmt(stmtList2); stmtList2->SetPrevStmt(stmtLast1); stmtList1->SetPrevStmt(stmtLast2); } } else { /* block was formerly empty and now has bNext's statements */ block->bbStmtList = stmtList2; } } // If either block or bNext has a profile weight // or if both block and bNext have non-zero weights // then we will use the max weight for the block. // const bool hasProfileWeight = block->hasProfileWeight() || bNext->hasProfileWeight(); const bool hasNonZeroWeight = (block->bbWeight > BB_ZERO_WEIGHT) || (bNext->bbWeight > BB_ZERO_WEIGHT); if (hasProfileWeight || hasNonZeroWeight) { weight_t const newWeight = max(block->bbWeight, bNext->bbWeight); if (hasProfileWeight) { block->setBBProfileWeight(newWeight); } else { assert(newWeight != BB_ZERO_WEIGHT); block->bbWeight = newWeight; block->bbFlags &= ~BBF_RUN_RARELY; } } // otherwise if either block has a zero weight we select the zero weight else { noway_assert((block->bbWeight == BB_ZERO_WEIGHT) || (bNext->bbWeight == BB_ZERO_WEIGHT)); block->bbWeight = BB_ZERO_WEIGHT; block->bbFlags |= BBF_RUN_RARELY; // Set the RarelyRun flag } /* set the right links */ block->bbJumpKind = bNext->bbJumpKind; VarSetOps::AssignAllowUninitRhs(this, block->bbLiveOut, bNext->bbLiveOut); // Update the beginning and ending IL offsets (bbCodeOffs and bbCodeOffsEnd). // Set the beginning IL offset to the minimum, and the ending offset to the maximum, of the respective blocks. // If one block has an unknown offset, we take the other block. // We are merging into 'block', so if its values are correct, just leave them alone. // TODO: we should probably base this on the statements within. if (block->bbCodeOffs == BAD_IL_OFFSET) { block->bbCodeOffs = bNext->bbCodeOffs; // If they are both BAD_IL_OFFSET, this doesn't change anything. } else if (bNext->bbCodeOffs != BAD_IL_OFFSET) { // The are both valid offsets; compare them. if (block->bbCodeOffs > bNext->bbCodeOffs) { block->bbCodeOffs = bNext->bbCodeOffs; } } if (block->bbCodeOffsEnd == BAD_IL_OFFSET) { block->bbCodeOffsEnd = bNext->bbCodeOffsEnd; // If they are both BAD_IL_OFFSET, this doesn't change anything. } else if (bNext->bbCodeOffsEnd != BAD_IL_OFFSET) { // The are both valid offsets; compare them. if (block->bbCodeOffsEnd < bNext->bbCodeOffsEnd) { block->bbCodeOffsEnd = bNext->bbCodeOffsEnd; } } if (((block->bbFlags & BBF_INTERNAL) != 0) && ((bNext->bbFlags & BBF_INTERNAL) == 0)) { // If 'block' is an internal block and 'bNext' isn't, then adjust the flags set on 'block'. block->bbFlags &= ~BBF_INTERNAL; // Clear the BBF_INTERNAL flag block->bbFlags |= BBF_IMPORTED; // Set the BBF_IMPORTED flag } /* Update the flags for block with those found in bNext */ block->bbFlags |= (bNext->bbFlags & BBF_COMPACT_UPD); /* mark bNext as removed */ bNext->bbFlags |= BBF_REMOVED; /* Unlink bNext and update all the marker pointers if necessary */ fgUnlinkRange(block->bbNext, bNext); // If bNext was the last block of a try or handler, update the EH table. ehUpdateForDeletedBlock(bNext); /* Set the jump targets */ switch (bNext->bbJumpKind) { case BBJ_CALLFINALLY: // Propagate RETLESS property block->bbFlags |= (bNext->bbFlags & BBF_RETLESS_CALL); FALLTHROUGH; case BBJ_COND: case BBJ_ALWAYS: case BBJ_EHCATCHRET: block->bbJumpDest = bNext->bbJumpDest; /* Update the predecessor list for 'bNext->bbJumpDest' */ fgReplacePred(bNext->bbJumpDest, bNext, block); /* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */ if (bNext->bbJumpKind == BBJ_COND && bNext->bbJumpDest != bNext->bbNext) { fgReplacePred(bNext->bbNext, bNext, block); } break; case BBJ_NONE: /* Update the predecessor list for 'bNext->bbNext' */ fgReplacePred(bNext->bbNext, bNext, block); break; case BBJ_EHFILTERRET: fgReplacePred(bNext->bbJumpDest, bNext, block); break; case BBJ_EHFINALLYRET: { unsigned hndIndex = block->getHndIndex(); EHblkDsc* ehDsc = ehGetDsc(hndIndex); if (ehDsc->HasFinallyHandler()) // No need to do this for fault handlers { BasicBlock* begBlk; BasicBlock* endBlk; ehGetCallFinallyBlockRange(hndIndex, &begBlk, &endBlk); BasicBlock* finBeg = ehDsc->ebdHndBeg; for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) { continue; } noway_assert(bcall->isBBCallAlwaysPair()); fgReplacePred(bcall->bbNext, bNext, block); } } } break; case BBJ_THROW: case BBJ_RETURN: /* no jumps or fall through blocks to set here */ break; case BBJ_SWITCH: block->bbJumpSwt = bNext->bbJumpSwt; // We are moving the switch jump from bNext to block. Examine the jump targets // of the BBJ_SWITCH at bNext and replace the predecessor to 'bNext' with ones to 'block' fgChangeSwitchBlock(bNext, block); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } if (bNext->isLoopAlign()) { block->bbFlags |= BBF_LOOP_ALIGN; JITDUMP("Propagating LOOP_ALIGN flag from " FMT_BB " to " FMT_BB " during compacting.\n", bNext->bbNum, block->bbNum); } // If we're collapsing a block created after the dominators are // computed, copy block number the block and reuse dominator // information from bNext to block. // // Note we have to do this renumbering after the full set of pred list // updates above, since those updates rely on stable bbNums; if we renumber // before the updates, we can create pred lists with duplicate m_block->bbNum // values (though different m_blocks). // if (fgDomsComputed && (block->bbNum > fgDomBBcount)) { BlockSetOps::Assign(this, block->bbReach, bNext->bbReach); BlockSetOps::ClearD(this, bNext->bbReach); block->bbIDom = bNext->bbIDom; bNext->bbIDom = nullptr; // In this case, there's no need to update the preorder and postorder numbering // since we're changing the bbNum, this makes the basic block all set. // JITDUMP("Renumbering " FMT_BB " to be " FMT_BB " to preserve dominator information\n", block->bbNum, bNext->bbNum); block->bbNum = bNext->bbNum; // Because we may have reordered pred lists when we swapped in // block for bNext above, we now need to re-reorder pred lists // to reflect the bbNum update. // // This process of reordering and re-reordering could likely be avoided // via a different update strategy. But because it's probably rare, // and we avoid most of the work if pred lists are already in order, // we'll just ensure everything is properly ordered. // for (BasicBlock* const checkBlock : Blocks()) { checkBlock->ensurePredListOrder(this); } } fgUpdateLoopsAfterCompacting(block, bNext); #if DEBUG if (verbose && 0) { printf("\nAfter compacting:\n"); fgDispBasicBlocks(false); } #endif #if DEBUG if (JitConfig.JitSlowDebugChecksEnabled() != 0) { // Make sure that the predecessor lists are accurate fgDebugCheckBBlist(); } #endif // DEBUG } //------------------------------------------------------------- // fgUpdateLoopsAfterCompacting: Update the loop table after block compaction. // // Arguments: // block - target of compaction. // bNext - bbNext of `block`. This block has been removed. // void Compiler::fgUpdateLoopsAfterCompacting(BasicBlock* block, BasicBlock* bNext) { /* Check if the removed block is not part the loop table */ noway_assert(bNext); for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++) { /* Some loops may have been already removed by * loop unrolling or conditional folding */ if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED) { continue; } /* Check the loop head (i.e. the block preceding the loop) */ if (optLoopTable[loopNum].lpHead == bNext) { optLoopTable[loopNum].lpHead = block; } /* Check the loop bottom */ if (optLoopTable[loopNum].lpBottom == bNext) { optLoopTable[loopNum].lpBottom = block; } /* Check the loop exit */ if (optLoopTable[loopNum].lpExit == bNext) { noway_assert(optLoopTable[loopNum].lpExitCnt == 1); optLoopTable[loopNum].lpExit = block; } /* Check the loop entry */ if (optLoopTable[loopNum].lpEntry == bNext) { optLoopTable[loopNum].lpEntry = block; } /* Check the loop top */ if (optLoopTable[loopNum].lpTop == bNext) { optLoopTable[loopNum].lpTop = block; } } } //------------------------------------------------------------- // fgUnreachableBlock: Remove a block when it is unreachable. // // This function cannot remove the first block. // // Arguments: // block - unreachable block to remove // void Compiler::fgUnreachableBlock(BasicBlock* block) { // genReturnBB should never be removed, as we might have special hookups there. // Therefore, we should never come here to remove the statements in the genReturnBB block. // For example, the profiler hookup needs to have the "void GT_RETURN" statement // to properly set the info.compProfilerCallback flag. noway_assert(block != genReturnBB); if (block->bbFlags & BBF_REMOVED) { return; } #ifdef DEBUG if (verbose) { printf("\nRemoving unreachable " FMT_BB "\n", block->bbNum); } #endif // DEBUG noway_assert(block->bbPrev != nullptr); // Can't use this function to remove the first block #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) assert(!block->isBBCallAlwaysPairTail()); // can't remove the BBJ_ALWAYS of a BBJ_CALLFINALLY / BBJ_ALWAYS pair #endif // First, delete all the code in the block. if (block->IsLIR()) { LIR::Range& blockRange = LIR::AsRange(block); if (!blockRange.IsEmpty()) { blockRange.Delete(this, block, blockRange.FirstNode(), blockRange.LastNode()); } } else { // TODO-Cleanup: I'm not sure why this happens -- if the block is unreachable, why does it have phis? // Anyway, remove any phis. Statement* firstNonPhi = block->FirstNonPhiDef(); if (block->bbStmtList != firstNonPhi) { if (firstNonPhi != nullptr) { firstNonPhi->SetPrevStmt(block->lastStmt()); } block->bbStmtList = firstNonPhi; } for (Statement* const stmt : block->Statements()) { fgRemoveStmt(block, stmt); } noway_assert(block->bbStmtList == nullptr); } // Next update the loop table and bbWeights optUpdateLoopsBeforeRemoveBlock(block); // Mark the block as removed block->bbFlags |= BBF_REMOVED; // Update bbRefs and bbPreds for the blocks reached by this block fgRemoveBlockAsPred(block); } //------------------------------------------------------------- // fgRemoveConditionalJump: Remove or morph a jump when we jump to the same // block when both the condition is true or false. Remove the branch condition, // but leave any required side effects. // // Arguments: // block - block with conditional branch // void Compiler::fgRemoveConditionalJump(BasicBlock* block) { noway_assert(block->bbJumpKind == BBJ_COND && block->bbJumpDest == block->bbNext); assert(compRationalIRForm == block->IsLIR()); flowList* flow = fgGetPredForBlock(block->bbNext, block); noway_assert(flow->flDupCount == 2); // Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount. block->bbJumpKind = BBJ_NONE; --block->bbNext->bbRefs; --flow->flDupCount; #ifdef DEBUG block->bbJumpDest = nullptr; if (verbose) { printf("Block " FMT_BB " becoming a BBJ_NONE to " FMT_BB " (jump target is the same whether the condition" " is true or false)\n", block->bbNum, block->bbNext->bbNum); } #endif // Remove the block jump condition if (block->IsLIR()) { LIR::Range& blockRange = LIR::AsRange(block); GenTree* test = blockRange.LastNode(); assert(test->OperIsConditionalJump()); bool isClosed; unsigned sideEffects; LIR::ReadOnlyRange testRange = blockRange.GetTreeRange(test, &isClosed, &sideEffects); // TODO-LIR: this should really be checking GTF_ALL_EFFECT, but that produces unacceptable // diffs compared to the existing backend. if (isClosed && ((sideEffects & GTF_SIDE_EFFECT) == 0)) { // If the jump and its operands form a contiguous, side-effect-free range, // remove them. blockRange.Delete(this, block, std::move(testRange)); } else { // Otherwise, just remove the jump node itself. blockRange.Remove(test, true); } } else { Statement* test = block->lastStmt(); GenTree* tree = test->GetRootNode(); noway_assert(tree->gtOper == GT_JTRUE); GenTree* sideEffList = nullptr; if (tree->gtFlags & GTF_SIDE_EFFECT) { gtExtractSideEffList(tree, &sideEffList); if (sideEffList) { noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT); #ifdef DEBUG if (verbose) { printf("Extracted side effects list from condition...\n"); gtDispTree(sideEffList); printf("\n"); } #endif } } // Delete the cond test or replace it with the side effect tree if (sideEffList == nullptr) { fgRemoveStmt(block, test); } else { test->SetRootNode(sideEffList); if (fgStmtListThreaded) { gtSetStmtInfo(test); fgSetStmtSeq(test); } } } } //------------------------------------------------------------- // fgOptimizeBranchToEmptyUnconditional: // Optimize a jump to an empty block which ends in an unconditional branch. // // Arguments: // block - source block // bDest - destination // // Returns: true if changes were made // bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBlock* bDest) { bool optimizeJump = true; assert(bDest->isEmpty()); assert(bDest->bbJumpKind == BBJ_ALWAYS); // We do not optimize jumps between two different try regions. // However jumping to a block that is not in any try region is OK // if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) { optimizeJump = false; } // Don't optimize a jump to a removed block if (bDest->bbJumpDest->bbFlags & BBF_REMOVED) { optimizeJump = false; } // Don't optimize a jump to a cloned finally if (bDest->bbFlags & BBF_CLONED_FINALLY_BEGIN) { optimizeJump = false; } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't optimize a jump to a finally target. For BB1->BB2->BB3, where // BB2 is a finally target, if we changed BB1 to jump directly to BB3, // it would skip the finally target. BB1 might be a BBJ_ALWAYS block part // of a BBJ_CALLFINALLY/BBJ_ALWAYS pair, so changing the finally target // would change the unwind behavior. if (bDest->bbFlags & BBF_FINALLY_TARGET) { optimizeJump = false; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Must optimize jump if bDest has been removed // if (bDest->bbFlags & BBF_REMOVED) { optimizeJump = true; } // If we are optimizing using real profile weights // then don't optimize a conditional jump to an unconditional jump // until after we have computed the edge weights // if (fgIsUsingProfileWeights() && !fgEdgeWeightsComputed) { fgNeedsUpdateFlowGraph = true; optimizeJump = false; } if (optimizeJump) { #ifdef DEBUG if (verbose) { printf("\nOptimizing a jump to an unconditional jump (" FMT_BB " -> " FMT_BB " -> " FMT_BB ")\n", block->bbNum, bDest->bbNum, bDest->bbJumpDest->bbNum); } #endif // DEBUG // // When we optimize a branch to branch we need to update the profile weight // of bDest by subtracting out the block/edge weight of the path that is being optimized. // if (fgHaveValidEdgeWeights && bDest->hasProfileWeight()) { flowList* edge1 = fgGetPredForBlock(bDest, block); noway_assert(edge1 != nullptr); weight_t edgeWeight; if (edge1->edgeWeightMin() != edge1->edgeWeightMax()) { // // We only have an estimate for the edge weight // edgeWeight = (edge1->edgeWeightMin() + edge1->edgeWeightMax()) / 2; // // Clear the profile weight flag // bDest->bbFlags &= ~BBF_PROF_WEIGHT; } else { // // We only have the exact edge weight // edgeWeight = edge1->edgeWeightMin(); } // // Update the bDest->bbWeight // if (bDest->bbWeight > edgeWeight) { bDest->bbWeight -= edgeWeight; } else { bDest->bbWeight = BB_ZERO_WEIGHT; bDest->bbFlags |= BBF_RUN_RARELY; // Set the RarelyRun flag } flowList* edge2 = fgGetPredForBlock(bDest->bbJumpDest, bDest); if (edge2 != nullptr) { // // Update the edge2 min/max weights // weight_t newEdge2Min; weight_t newEdge2Max; if (edge2->edgeWeightMin() > edge1->edgeWeightMin()) { newEdge2Min = edge2->edgeWeightMin() - edge1->edgeWeightMin(); } else { newEdge2Min = BB_ZERO_WEIGHT; } if (edge2->edgeWeightMax() > edge1->edgeWeightMin()) { newEdge2Max = edge2->edgeWeightMax() - edge1->edgeWeightMin(); } else { newEdge2Max = BB_ZERO_WEIGHT; } edge2->setEdgeWeights(newEdge2Min, newEdge2Max, bDest); } } // Optimize the JUMP to empty unconditional JUMP to go to the new target block->bbJumpDest = bDest->bbJumpDest; fgAddRefPred(bDest->bbJumpDest, block, fgRemoveRefPred(bDest, block)); return true; } return false; } //------------------------------------------------------------- // fgOptimizeEmptyBlock: // Does flow optimization of an empty block (can remove it in some cases) // // Arguments: // block - an empty block // // Returns: true if changes were made // bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) { assert(block->isEmpty()); BasicBlock* bPrev = block->bbPrev; switch (block->bbJumpKind) { case BBJ_COND: case BBJ_SWITCH: /* can never happen */ noway_assert(!"Conditional or switch block with empty body!"); break; case BBJ_THROW: case BBJ_CALLFINALLY: case BBJ_RETURN: case BBJ_EHCATCHRET: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: /* leave them as is */ /* some compilers generate multiple returns and put all of them at the end - * to solve that we need the predecessor list */ break; case BBJ_ALWAYS: // A GOTO cannot be to the next block since that // should have been fixed by the optimization above // An exception is made for a jump from Hot to Cold noway_assert(block->bbJumpDest != block->bbNext || block->isBBCallAlwaysPairTail() || fgInDifferentRegions(block, block->bbNext)); /* Cannot remove the first BB */ if (!bPrev) { break; } /* Do not remove a block that jumps to itself - used for while (true){} */ if (block->bbJumpDest == block) { break; } /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ if (bPrev->bbJumpKind != BBJ_NONE) { break; } // can't allow fall through into cold code if (block->bbNext == fgFirstColdBlock) { break; } /* Can fall through since this is similar with removing * a BBJ_NONE block, only the successor is different */ FALLTHROUGH; case BBJ_NONE: /* special case if this is the first BB */ if (!bPrev) { assert(block == fgFirstBB); } else { /* If this block follows a BBJ_CALLFINALLY do not remove it * (because we don't know who may jump to it) */ if (bPrev->bbJumpKind == BBJ_CALLFINALLY) { break; } } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) /* Don't remove finally targets */ if (block->bbFlags & BBF_FINALLY_TARGET) break; #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) #if defined(FEATURE_EH_FUNCLETS) /* Don't remove an empty block that is in a different EH region * from its successor block, if the block is the target of a * catch return. It is required that the return address of a * catch be in the correct EH region, for re-raise of thread * abort exceptions to work. Insert a NOP in the empty block * to ensure we generate code for the block, if we keep it. */ { BasicBlock* succBlock; if (block->bbJumpKind == BBJ_ALWAYS) { succBlock = block->bbJumpDest; } else { succBlock = block->bbNext; } if ((succBlock != nullptr) && !BasicBlock::sameEHRegion(block, succBlock)) { // The empty block and the block that follows it are in different // EH regions. Is this a case where they can't be merged? bool okToMerge = true; // assume it's ok for (BasicBlock* const predBlock : block->PredBlocks()) { if (predBlock->bbJumpKind == BBJ_EHCATCHRET) { assert(predBlock->bbJumpDest == block); okToMerge = false; // we can't get rid of the empty block break; } } if (!okToMerge) { // Insert a NOP in the empty block to ensure we generate code // for the catchret target in the right EH region. GenTree* nop = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); if (block->IsLIR()) { LIR::AsRange(block).InsertAtEnd(nop); LIR::ReadOnlyRange range(nop, nop); m_pLowering->LowerRange(block, range); } else { Statement* nopStmt = fgNewStmtAtEnd(block, nop); fgSetStmtSeq(nopStmt); gtSetStmtInfo(nopStmt); } #ifdef DEBUG if (verbose) { printf("\nKeeping empty block " FMT_BB " - it is the target of a catch return\n", block->bbNum); } #endif // DEBUG break; // go to the next block } } } #endif // FEATURE_EH_FUNCLETS if (!ehCanDeleteEmptyBlock(block)) { // We're not allowed to remove this block due to reasons related to the EH table. break; } /* special case if this is the last BB */ if (block == fgLastBB) { if (!bPrev) { break; } fgLastBB = bPrev; } // When using profile weights, fgComputeEdgeWeights expects the first non-internal block to have profile // weight. // Make sure we don't break that invariant. if (fgIsUsingProfileWeights() && block->hasProfileWeight() && (block->bbFlags & BBF_INTERNAL) == 0) { BasicBlock* bNext = block->bbNext; // Check if the next block can't maintain the invariant. if ((bNext == nullptr) || ((bNext->bbFlags & BBF_INTERNAL) != 0) || !bNext->hasProfileWeight()) { // Check if the current block is the first non-internal block. BasicBlock* curBB = bPrev; while ((curBB != nullptr) && (curBB->bbFlags & BBF_INTERNAL) != 0) { curBB = curBB->bbPrev; } if (curBB == nullptr) { // This block is the first non-internal block and it has profile weight. // Don't delete it. break; } } } /* Remove the block */ compCurBB = block; fgRemoveBlock(block, /* unreachable */ false); return true; default: noway_assert(!"Unexpected bbJumpKind"); break; } return false; } //------------------------------------------------------------- // fgOptimizeSwitchBranches: // Does flow optimization for a switch - bypasses jumps to empty unconditional branches, // and transforms degenerate switch cases like those with 1 or 2 targets. // // Arguments: // block - block with switch // // Returns: true if changes were made // bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) { assert(block->bbJumpKind == BBJ_SWITCH); unsigned jmpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jmpTab = block->bbJumpSwt->bbsDstTab; BasicBlock* bNewDest; // the new jump target for the current switch case BasicBlock* bDest; bool returnvalue = false; do { REPEAT_SWITCH:; bDest = *jmpTab; bNewDest = bDest; // Do we have a JUMP to an empty unconditional JUMP block? if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { bool optimizeJump = true; // We do not optimize jumps between two different try regions. // However jumping to a block that is not in any try region is OK // if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) { optimizeJump = false; } // If we are optimize using real profile weights // then don't optimize a switch jump to an unconditional jump // until after we have computed the edge weights // if (fgIsUsingProfileWeights() && !fgEdgeWeightsComputed) { fgNeedsUpdateFlowGraph = true; optimizeJump = false; } if (optimizeJump) { bNewDest = bDest->bbJumpDest; #ifdef DEBUG if (verbose) { printf("\nOptimizing a switch jump to an empty block with an unconditional jump (" FMT_BB " -> " FMT_BB " -> " FMT_BB ")\n", block->bbNum, bDest->bbNum, bNewDest->bbNum); } #endif // DEBUG } } if (bNewDest != bDest) { // // When we optimize a branch to branch we need to update the profile weight // of bDest by subtracting out the block/edge weight of the path that is being optimized. // if (fgIsUsingProfileWeights() && bDest->hasProfileWeight()) { if (fgHaveValidEdgeWeights) { flowList* edge = fgGetPredForBlock(bDest, block); weight_t branchThroughWeight = edge->edgeWeightMin(); if (bDest->bbWeight > branchThroughWeight) { bDest->bbWeight -= branchThroughWeight; } else { bDest->bbWeight = BB_ZERO_WEIGHT; bDest->bbFlags |= BBF_RUN_RARELY; } } } // Update the switch jump table *jmpTab = bNewDest; // Maintain, if necessary, the set of unique targets of "block." UpdateSwitchTableTarget(block, bDest, bNewDest); fgAddRefPred(bNewDest, block, fgRemoveRefPred(bDest, block)); // we optimized a Switch label - goto REPEAT_SWITCH to follow this new jump returnvalue = true; goto REPEAT_SWITCH; } } while (++jmpTab, --jmpCnt); Statement* switchStmt = nullptr; LIR::Range* blockRange = nullptr; GenTree* switchTree; if (block->IsLIR()) { blockRange = &LIR::AsRange(block); switchTree = blockRange->LastNode(); assert(switchTree->OperGet() == GT_SWITCH_TABLE); } else { switchStmt = block->lastStmt(); switchTree = switchStmt->GetRootNode(); assert(switchTree->OperGet() == GT_SWITCH); } noway_assert(switchTree->gtType == TYP_VOID); // At this point all of the case jump targets have been updated such // that none of them go to block that is an empty unconditional block // jmpTab = block->bbJumpSwt->bbsDstTab; jmpCnt = block->bbJumpSwt->bbsCount; // Now check for two trivial switch jumps. // if (block->NumSucc(this) == 1) { // Use BBJ_ALWAYS for a switch with only a default clause, or with only one unique successor. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("\nRemoving a switch jump with a single target (" FMT_BB ")\n", block->bbNum); printf("BEFORE:\n"); } #endif // DEBUG if (block->IsLIR()) { bool isClosed; unsigned sideEffects; LIR::ReadOnlyRange switchTreeRange = blockRange->GetTreeRange(switchTree, &isClosed, &sideEffects); // The switch tree should form a contiguous, side-effect free range by construction. See // Lowering::LowerSwitch for details. assert(isClosed); assert((sideEffects & GTF_ALL_EFFECT) == 0); blockRange->Delete(this, block, std::move(switchTreeRange)); } else { /* check for SIDE_EFFECTS */ if (switchTree->gtFlags & GTF_SIDE_EFFECT) { /* Extract the side effects from the conditional */ GenTree* sideEffList = nullptr; gtExtractSideEffList(switchTree, &sideEffList); if (sideEffList == nullptr) { goto NO_SWITCH_SIDE_EFFECT; } noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT); #ifdef DEBUG if (verbose) { printf("\nSwitch expression has side effects! Extracting side effects...\n"); gtDispTree(switchTree); printf("\n"); gtDispTree(sideEffList); printf("\n"); } #endif // DEBUG /* Replace the conditional statement with the list of side effects */ noway_assert(sideEffList->gtOper != GT_SWITCH); switchStmt->SetRootNode(sideEffList); if (fgStmtListThreaded) { compCurBB = block; /* Update ordering, costs, FP levels, etc. */ gtSetStmtInfo(switchStmt); /* Re-link the nodes for this statement */ fgSetStmtSeq(switchStmt); } } else { NO_SWITCH_SIDE_EFFECT: /* conditional has NO side effect - remove it */ fgRemoveStmt(block, switchStmt); } } // Change the switch jump into a BBJ_ALWAYS block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; block->bbJumpKind = BBJ_ALWAYS; if (jmpCnt > 1) { for (unsigned i = 1; i < jmpCnt; ++i) { (void)fgRemoveRefPred(jmpTab[i], block); } } return true; } else if (block->bbJumpSwt->bbsCount == 2 && block->bbJumpSwt->bbsDstTab[1] == block->bbNext) { /* Use a BBJ_COND(switchVal==0) for a switch with only one significant clause besides the default clause, if the default clause is bbNext */ GenTree* switchVal = switchTree->AsOp()->gtOp1; noway_assert(genActualTypeIsIntOrI(switchVal->TypeGet())); // If we are in LIR, remove the jump table from the block. if (block->IsLIR()) { GenTree* jumpTable = switchTree->AsOp()->gtOp2; assert(jumpTable->OperGet() == GT_JMPTABLE); blockRange->Remove(jumpTable); } // Change the GT_SWITCH(switchVal) into GT_JTRUE(GT_EQ(switchVal==0)). // Also mark the node as GTF_DONT_CSE as further down JIT is not capable of handling it. // For example CSE could determine that the expression rooted at GT_EQ is a candidate cse and // replace it with a COMMA node. In such a case we will end up with GT_JTRUE node pointing to // a COMMA node which results in noway asserts in fgMorphSmpOp(), optAssertionGen() and rpPredictTreeRegUse(). // For the same reason fgMorphSmpOp() marks GT_JTRUE nodes with RELOP children as GTF_DONT_CSE. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("\nConverting a switch (" FMT_BB ") with only one significant clause besides a default target to a " "conditional branch\n", block->bbNum); } #endif // DEBUG switchTree->ChangeOper(GT_JTRUE); GenTree* zeroConstNode = gtNewZeroConNode(genActualType(switchVal->TypeGet())); GenTree* condNode = gtNewOperNode(GT_EQ, TYP_INT, switchVal, zeroConstNode); switchTree->AsOp()->gtOp1 = condNode; switchTree->AsOp()->gtOp1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE); if (block->IsLIR()) { blockRange->InsertAfter(switchVal, zeroConstNode, condNode); LIR::ReadOnlyRange range(zeroConstNode, switchTree); m_pLowering->LowerRange(block, range); } else if (fgStmtListThreaded) { gtSetStmtInfo(switchStmt); fgSetStmtSeq(switchStmt); } block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; block->bbJumpKind = BBJ_COND; return true; } return returnvalue; } //------------------------------------------------------------- // fgBlockEndFavorsTailDuplication: // Heuristic function that returns true if this block ends in a statement that looks favorable // for tail-duplicating its successor (such as assigning a constant to a local). // // Arguments: // block: BasicBlock we are considering duplicating the successor of // lclNum: local that is used by the successor block, provided by // prior call to fgBlockIsGoodTailDuplicationCandidate // // Returns: // true if block end is favorable for tail duplication // // Notes: // This is the second half of the evaluation for tail duplication, where we try // to determine if this predecessor block assigns a constant or provides useful // information about a local that is tested in an unconditionally executed successor. // If so then duplicating the successor will likely allow the test to be // optimized away. // bool Compiler::fgBlockEndFavorsTailDuplication(BasicBlock* block, unsigned lclNum) { if (block->isRunRarely()) { return false; } // If the local is address exposed, we currently can't optimize. // LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (lclDsc->IsAddressExposed()) { return false; } Statement* const lastStmt = block->lastStmt(); Statement* const firstStmt = block->FirstNonPhiDef(); if (lastStmt == nullptr) { return false; } // Tail duplication tends to pay off when the last statement // is an assignment of a constant, arraylength, or a relop. // This is because these statements produce information about values // that would otherwise be lost at the upcoming merge point. // // Check up to N statements... // const int limit = 2; int count = 0; Statement* stmt = lastStmt; while (count < limit) { count++; GenTree* const tree = stmt->GetRootNode(); if (tree->OperIs(GT_ASG) && !tree->OperIsBlkOp()) { GenTree* const op1 = tree->AsOp()->gtOp1; if (op1->IsLocal()) { const unsigned op1LclNum = op1->AsLclVarCommon()->GetLclNum(); if (op1LclNum == lclNum) { GenTree* const op2 = tree->AsOp()->gtOp2; if (op2->OperIs(GT_ARR_LENGTH) || op2->OperIsConst() || op2->OperIsCompare()) { return true; } } } } Statement* const prevStmt = stmt->GetPrevStmt(); // The statement list prev links wrap from first->last, so exit // when we see lastStmt again, as we've now seen all statements. // if (prevStmt == lastStmt) { break; } stmt = prevStmt; } return false; } //------------------------------------------------------------- // fgBlockIsGoodTailDuplicationCandidate: // Heuristic function that examines a block (presumably one that is a merge point) to determine // if it is a good candidate to be duplicated. // // Arguments: // target - the tail block (candidate for duplication) // // Returns: // true if this is a good candidate, false otherwise // if true, lclNum is set to lcl to scan for in predecessor block // // Notes: // The current heuristic is that tail duplication is deemed favorable if this // block simply tests the value of a local against a constant or some other local. // // This is the first half of the evaluation for tail duplication. We subsequently // need to check if predecessors of this block assigns a constant to the local. // bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock* target, unsigned* lclNum) { *lclNum = BAD_VAR_NUM; // Here we are looking for small blocks where a local live-into the block // ultimately feeds a simple conditional branch. // // These blocks are small, and when duplicated onto the tail of blocks that end in // assignments, there is a high probability of the branch completely going away. // // This is by no means the only kind of tail that it is beneficial to duplicate, // just the only one we recognize for now. if (target->bbJumpKind != BBJ_COND) { return false; } // No point duplicating this block if it's not a control flow join. if (target->bbRefs < 2) { return false; } Statement* const lastStmt = target->lastStmt(); Statement* const firstStmt = target->FirstNonPhiDef(); // We currently allow just one statement aside from the branch. // if ((firstStmt != lastStmt) && (firstStmt != lastStmt->GetPrevStmt())) { return false; } // Verify the branch is just a simple local compare. // GenTree* const lastTree = lastStmt->GetRootNode(); if (lastTree->gtOper != GT_JTRUE) { return false; } // must be some kind of relational operator GenTree* const cond = lastTree->AsOp()->gtOp1; if (!cond->OperIsCompare()) { return false; } // op1 must be some combinations of casts of local or constant GenTree* op1 = cond->AsOp()->gtOp1; while (op1->gtOper == GT_CAST) { op1 = op1->AsOp()->gtOp1; } if (!op1->IsLocal() && !op1->OperIsConst()) { return false; } // op2 must be some combinations of casts of local or constant GenTree* op2 = cond->AsOp()->gtOp2; while (op2->gtOper == GT_CAST) { op2 = op2->AsOp()->gtOp1; } if (!op2->IsLocal() && !op2->OperIsConst()) { return false; } // Tree must have one constant and one local, or be comparing // the same local to itself. unsigned lcl1 = BAD_VAR_NUM; unsigned lcl2 = BAD_VAR_NUM; if (op1->IsLocal()) { lcl1 = op1->AsLclVarCommon()->GetLclNum(); } if (op2->IsLocal()) { lcl2 = op2->AsLclVarCommon()->GetLclNum(); } if ((lcl1 != BAD_VAR_NUM) && op2->OperIsConst()) { *lclNum = lcl1; } else if ((lcl2 != BAD_VAR_NUM) && op1->OperIsConst()) { *lclNum = lcl2; } else if ((lcl1 != BAD_VAR_NUM) && (lcl1 == lcl2)) { *lclNum = lcl1; } else { return false; } // If there's no second statement, we're good. // if (firstStmt == lastStmt) { return true; } // Otherwise check the first stmt. // Verify the branch is just a simple local compare. // GenTree* const firstTree = firstStmt->GetRootNode(); if (firstTree->gtOper != GT_ASG) { return false; } GenTree* const lhs = firstTree->AsOp()->gtOp1; if (!lhs->OperIs(GT_LCL_VAR)) { return false; } const unsigned lhsLcl = lhs->AsLclVarCommon()->GetLclNum(); if (lhsLcl != *lclNum) { return false; } // Could allow unary here too... // GenTree* const rhs = firstTree->AsOp()->gtOp2; if (!rhs->OperIsBinary()) { return false; } // op1 must be some combinations of casts of local or constant // (or unary) op1 = rhs->AsOp()->gtOp1; while (op1->gtOper == GT_CAST) { op1 = op1->AsOp()->gtOp1; } if (!op1->IsLocal() && !op1->OperIsConst()) { return false; } // op2 must be some combinations of casts of local or constant // (or unary) op2 = rhs->AsOp()->gtOp2; // A binop may not actually have an op2. // if (op2 == nullptr) { return false; } while (op2->gtOper == GT_CAST) { op2 = op2->AsOp()->gtOp1; } if (!op2->IsLocal() && !op2->OperIsConst()) { return false; } // Tree must have one constant and one local, or be comparing // the same local to itself. lcl1 = BAD_VAR_NUM; lcl2 = BAD_VAR_NUM; if (op1->IsLocal()) { lcl1 = op1->AsLclVarCommon()->GetLclNum(); } if (op2->IsLocal()) { lcl2 = op2->AsLclVarCommon()->GetLclNum(); } if ((lcl1 != BAD_VAR_NUM) && op2->OperIsConst()) { *lclNum = lcl1; } else if ((lcl2 != BAD_VAR_NUM) && op1->OperIsConst()) { *lclNum = lcl2; } else if ((lcl1 != BAD_VAR_NUM) && (lcl1 == lcl2)) { *lclNum = lcl1; } else { return false; } return true; } //------------------------------------------------------------- // fgOptimizeUncondBranchToSimpleCond: // For a block which has an unconditional branch, look to see if its target block // is a good candidate for tail duplication, and if so do that duplication. // // Arguments: // block - block with uncond branch // target - block which is target of first block // // Returns: true if changes were made // // Notes: // This optimization generally reduces code size and path length. // bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* target) { JITDUMP("Considering uncond to cond " FMT_BB " -> " FMT_BB "\n", block->bbNum, target->bbNum); if (!BasicBlock::sameEHRegion(block, target)) { return false; } if (fgBBisScratch(block)) { return false; } unsigned lclNum = BAD_VAR_NUM; // First check if the successor tests a local and then branches on the result // of a test, and obtain the local if so. // if (!fgBlockIsGoodTailDuplicationCandidate(target, &lclNum)) { return false; } // At this point we know target is BBJ_COND. // // Bail out if OSR, as we can have unusual flow into loops. If one // of target's successors is also a backedge target, this optimization // may mess up loop recognition by creating too many non-loop preds. // if (opts.IsOSR()) { assert(target->bbJumpKind == BBJ_COND); if ((target->bbNext->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) { JITDUMP("Deferring: " FMT_BB " --> " FMT_BB "; latter looks like loop top\n", target->bbNum, target->bbNext->bbNum); return false; } if ((target->bbJumpDest->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) { JITDUMP("Deferring: " FMT_BB " --> " FMT_BB "; latter looks like loop top\n", target->bbNum, target->bbJumpDest->bbNum); return false; } } // See if this block assigns constant or other interesting tree to that same local. // if (!fgBlockEndFavorsTailDuplication(block, lclNum)) { return false; } // NOTE: we do not currently hit this assert because this function is only called when // `fgUpdateFlowGraph` has been called with `doTailDuplication` set to true, and the // backend always calls `fgUpdateFlowGraph` with `doTailDuplication` set to false. assert(!block->IsLIR()); // Duplicate the target block at the end of this block // for (Statement* stmt : target->NonPhiStatements()) { GenTree* clone = gtCloneExpr(stmt->GetRootNode()); noway_assert(clone); Statement* cloneStmt = gtNewStmt(clone); if (fgStmtListThreaded) { gtSetStmtInfo(cloneStmt); } fgInsertStmtAtEnd(block, cloneStmt); } // Fix up block's flow // block->bbJumpKind = BBJ_COND; block->bbJumpDest = target->bbJumpDest; fgAddRefPred(block->bbJumpDest, block); fgRemoveRefPred(target, block); // add an unconditional block after this block to jump to the target block's fallthrough block // BasicBlock* next = fgNewBBafter(BBJ_ALWAYS, block, true); // The new block 'next' will inherit its weight from 'block' // next->inheritWeight(block); next->bbJumpDest = target->bbNext; fgAddRefPred(next, block); fgAddRefPred(next->bbJumpDest, next); JITDUMP("fgOptimizeUncondBranchToSimpleCond(from " FMT_BB " to cond " FMT_BB "), created new uncond " FMT_BB "\n", block->bbNum, target->bbNum, next->bbNum); JITDUMP(" expecting opts to key off V%02u in " FMT_BB "\n", lclNum, block->bbNum); return true; } //------------------------------------------------------------- // fgOptimizeBranchToNext: // Optimize a block which has a branch to the following block // // Arguments: // block - block with a branch // bNext - block which is both next and the target of the first block // bPrev - block which is prior to the first block // // Returns: true if changes were made // bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, BasicBlock* bPrev) { assert(block->KindIs(BBJ_COND, BBJ_ALWAYS)); assert(block->bbJumpDest == bNext); assert(block->bbNext == bNext); assert(block->bbPrev == bPrev); if (block->bbJumpKind == BBJ_ALWAYS) { // We can't remove it if it is a branch from hot => cold if (!fgInDifferentRegions(block, bNext)) { // We can't remove if it is marked as BBF_KEEP_BBJ_ALWAYS if (!(block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // We can't remove if the BBJ_ALWAYS is part of a BBJ_CALLFINALLY pair if (!block->isBBCallAlwaysPairTail()) { /* the unconditional jump is to the next BB */ block->bbJumpKind = BBJ_NONE; #ifdef DEBUG if (verbose) { printf("\nRemoving unconditional jump to next block (" FMT_BB " -> " FMT_BB ") (converted " FMT_BB " to " "fall-through)\n", block->bbNum, bNext->bbNum, block->bbNum); } #endif // DEBUG return true; } } } } else { /* remove the conditional statement at the end of block */ noway_assert(block->bbJumpKind == BBJ_COND); noway_assert(block->isValid()); #ifdef DEBUG if (verbose) { printf("\nRemoving conditional jump to next block (" FMT_BB " -> " FMT_BB ")\n", block->bbNum, bNext->bbNum); } #endif // DEBUG if (block->IsLIR()) { LIR::Range& blockRange = LIR::AsRange(block); GenTree* jmp = blockRange.LastNode(); assert(jmp->OperIsConditionalJump()); if (jmp->OperGet() == GT_JTRUE) { jmp->AsOp()->gtOp1->gtFlags &= ~GTF_SET_FLAGS; } bool isClosed; unsigned sideEffects; LIR::ReadOnlyRange jmpRange = blockRange.GetTreeRange(jmp, &isClosed, &sideEffects); // TODO-LIR: this should really be checking GTF_ALL_EFFECT, but that produces unacceptable // diffs compared to the existing backend. if (isClosed && ((sideEffects & GTF_SIDE_EFFECT) == 0)) { // If the jump and its operands form a contiguous, side-effect-free range, // remove them. blockRange.Delete(this, block, std::move(jmpRange)); } else { // Otherwise, just remove the jump node itself. blockRange.Remove(jmp, true); } } else { Statement* condStmt = block->lastStmt(); GenTree* cond = condStmt->GetRootNode(); noway_assert(cond->gtOper == GT_JTRUE); /* check for SIDE_EFFECTS */ if (cond->gtFlags & GTF_SIDE_EFFECT) { /* Extract the side effects from the conditional */ GenTree* sideEffList = nullptr; gtExtractSideEffList(cond, &sideEffList); if (sideEffList == nullptr) { compCurBB = block; fgRemoveStmt(block, condStmt); } else { noway_assert(sideEffList->gtFlags & GTF_SIDE_EFFECT); #ifdef DEBUG if (verbose) { printf("\nConditional has side effects! Extracting side effects...\n"); gtDispTree(cond); printf("\n"); gtDispTree(sideEffList); printf("\n"); } #endif // DEBUG /* Replace the conditional statement with the list of side effects */ noway_assert(sideEffList->gtOper != GT_JTRUE); condStmt->SetRootNode(sideEffList); if (fgStmtListThreaded) { compCurBB = block; /* Update ordering, costs, FP levels, etc. */ gtSetStmtInfo(condStmt); /* Re-link the nodes for this statement */ fgSetStmtSeq(condStmt); } } } else { compCurBB = block; /* conditional has NO side effect - remove it */ fgRemoveStmt(block, condStmt); } } /* Conditional is gone - simply fall into the next block */ block->bbJumpKind = BBJ_NONE; /* Update bbRefs and bbNum - Conditional predecessors to the same * block are counted twice so we have to remove one of them */ noway_assert(bNext->countOfInEdges() > 1); fgRemoveRefPred(bNext, block); return true; } return false; } //------------------------------------------------------------- // fgOptimizeBranch: Optimize an unconditional branch that branches to a conditional branch. // // Currently we require that the conditional branch jump back to the block that follows the unconditional // branch. We can improve the code execution and layout by concatenating a copy of the conditional branch // block at the end of the conditional branch and reversing the sense of the branch. // // This is only done when the amount of code to be copied is smaller than our calculated threshold // in maxDupCostSz. // // Arguments: // bJump - block with branch // // Returns: true if changes were made // bool Compiler::fgOptimizeBranch(BasicBlock* bJump) { if (opts.MinOpts()) { return false; } if (bJump->bbJumpKind != BBJ_ALWAYS) { return false; } if (bJump->bbFlags & BBF_KEEP_BBJ_ALWAYS) { return false; } // Don't hoist a conditional branch into the scratch block; we'd prefer it stay // either BBJ_NONE or BBJ_ALWAYS. if (fgBBisScratch(bJump)) { return false; } BasicBlock* bDest = bJump->bbJumpDest; if (bDest->bbJumpKind != BBJ_COND) { return false; } if (bDest->bbJumpDest != bJump->bbNext) { return false; } // 'bJump' must be in the same try region as the condition, since we're going to insert // a duplicated condition in 'bJump', and the condition might include exception throwing code. if (!BasicBlock::sameTryRegion(bJump, bDest)) { return false; } // do not jump into another try region BasicBlock* bDestNext = bDest->bbNext; if (bDestNext->hasTryIndex() && !BasicBlock::sameTryRegion(bJump, bDestNext)) { return false; } // This function is only called by fgReorderBlocks, which we do not run in the backend. // If we wanted to run block reordering in the backend, we would need to be able to // calculate cost information for LIR on a per-node basis in order for this function // to work. assert(!bJump->IsLIR()); assert(!bDest->IsLIR()); unsigned estDupCostSz = 0; for (Statement* const stmt : bDest->Statements()) { // We want to compute the costs of the statement. Unfortunately, gtPrepareCost() / gtSetStmtInfo() // call gtSetEvalOrder(), which can reorder nodes. If it does so, we need to re-thread the gtNext/gtPrev // links. We don't know if it does or doesn't reorder nodes, so we end up always re-threading the links. gtSetStmtInfo(stmt); if (fgStmtListThreaded) { fgSetStmtSeq(stmt); } GenTree* expr = stmt->GetRootNode(); estDupCostSz += expr->GetCostSz(); } bool allProfileWeightsAreValid = false; weight_t weightJump = bJump->bbWeight; weight_t weightDest = bDest->bbWeight; weight_t weightNext = bJump->bbNext->bbWeight; bool rareJump = bJump->isRunRarely(); bool rareDest = bDest->isRunRarely(); bool rareNext = bJump->bbNext->isRunRarely(); // If we have profile data then we calculate the number of time // the loop will iterate into loopIterations if (fgIsUsingProfileWeights()) { // Only rely upon the profile weight when all three of these blocks // have either good profile weights or are rarelyRun // if ((bJump->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) && (bDest->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY)) && (bJump->bbNext->bbFlags & (BBF_PROF_WEIGHT | BBF_RUN_RARELY))) { allProfileWeightsAreValid = true; if ((weightJump * 100) < weightDest) { rareJump = true; } if ((weightNext * 100) < weightDest) { rareNext = true; } if (((weightDest * 100) < weightJump) && ((weightDest * 100) < weightNext)) { rareDest = true; } } } unsigned maxDupCostSz = 6; // // Branches between the hot and rarely run regions // should be minimized. So we allow a larger size // if (rareDest != rareJump) { maxDupCostSz += 6; } if (rareDest != rareNext) { maxDupCostSz += 6; } // // We we are ngen-ing: // If the uncondional branch is a rarely run block then // we are willing to have more code expansion since we // won't be running code from this page // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { if (rareJump) { maxDupCostSz *= 2; } } // If the compare has too high cost then we don't want to dup bool costIsTooHigh = (estDupCostSz > maxDupCostSz); #ifdef DEBUG if (verbose) { printf("\nDuplication of the conditional block " FMT_BB " (always branch from " FMT_BB ") %s, because the cost of duplication (%i) is %s than %i, validProfileWeights = %s\n", bDest->bbNum, bJump->bbNum, costIsTooHigh ? "not done" : "performed", estDupCostSz, costIsTooHigh ? "greater" : "less or equal", maxDupCostSz, allProfileWeightsAreValid ? "true" : "false"); } #endif // DEBUG if (costIsTooHigh) { return false; } /* Looks good - duplicate the conditional block */ Statement* newStmtList = nullptr; // new stmt list to be added to bJump Statement* newLastStmt = nullptr; /* Visit all the statements in bDest */ for (Statement* const curStmt : bDest->Statements()) { // Clone/substitute the expression. Statement* stmt = gtCloneStmt(curStmt); // cloneExpr doesn't handle everything. if (stmt == nullptr) { return false; } if (fgStmtListThreaded) { gtSetStmtInfo(stmt); fgSetStmtSeq(stmt); } /* Append the expression to our list */ if (newStmtList != nullptr) { newLastStmt->SetNextStmt(stmt); } else { newStmtList = stmt; } stmt->SetPrevStmt(newLastStmt); newLastStmt = stmt; } // Get to the condition node from the statement tree. GenTree* condTree = newLastStmt->GetRootNode(); noway_assert(condTree->gtOper == GT_JTRUE); // Set condTree to the operand to the GT_JTRUE. condTree = condTree->AsOp()->gtOp1; // This condTree has to be a RelOp comparison. if (condTree->OperIsCompare() == false) { return false; } // Join the two linked lists. Statement* lastStmt = bJump->lastStmt(); if (lastStmt != nullptr) { Statement* stmt = bJump->firstStmt(); stmt->SetPrevStmt(newLastStmt); lastStmt->SetNextStmt(newStmtList); newStmtList->SetPrevStmt(lastStmt); } else { bJump->bbStmtList = newStmtList; newStmtList->SetPrevStmt(newLastStmt); } // // Reverse the sense of the compare // gtReverseCond(condTree); // We need to update the following flags of the bJump block if they were set in the bDest block bJump->bbFlags |= (bDest->bbFlags & (BBF_HAS_NEWOBJ | BBF_HAS_NEWARRAY | BBF_HAS_NULLCHECK | BBF_HAS_IDX_LEN)); bJump->bbJumpKind = BBJ_COND; bJump->bbJumpDest = bDest->bbNext; /* Update bbRefs and bbPreds */ // bJump now falls through into the next block // fgAddRefPred(bJump->bbNext, bJump); // bJump no longer jumps to bDest // fgRemoveRefPred(bDest, bJump); // bJump now jumps to bDest->bbNext // fgAddRefPred(bDest->bbNext, bJump); if (weightJump > 0) { if (allProfileWeightsAreValid) { if (weightDest > weightJump) { bDest->bbWeight = (weightDest - weightJump); } else if (!bDest->isRunRarely()) { bDest->bbWeight = BB_UNITY_WEIGHT; } } else { weight_t newWeightDest = 0; if (weightDest > weightJump) { newWeightDest = (weightDest - weightJump); } if (weightDest >= (BB_LOOP_WEIGHT_SCALE * BB_UNITY_WEIGHT) / 2) { newWeightDest = (weightDest * 2) / (BB_LOOP_WEIGHT_SCALE * BB_UNITY_WEIGHT); } if (newWeightDest > 0) { bDest->bbWeight = newWeightDest; } } } #if DEBUG if (verbose) { // Dump out the newStmtList that we created printf("\nfgOptimizeBranch added these statements(s) at the end of " FMT_BB ":\n", bJump->bbNum); for (Statement* stmt : StatementList(newStmtList)) { gtDispStmt(stmt); } printf("\nfgOptimizeBranch changed block " FMT_BB " from BBJ_ALWAYS to BBJ_COND.\n", bJump->bbNum); printf("\nAfter this change in fgOptimizeBranch the BB graph is:"); fgDispBasicBlocks(verboseTrees); printf("\n"); } #endif // DEBUG return true; } //----------------------------------------------------------------------------- // fgOptimizeSwitchJump: see if a switch has a dominant case, and modify to // check for that case up front (aka switch peeling). // // Returns: // True if the switch now has an upstream check for the dominant case. // bool Compiler::fgOptimizeSwitchJumps() { if (!fgHasSwitch) { return false; } bool modified = false; for (BasicBlock* const block : Blocks()) { // Lowering expands switches, so calling this method on lowered IR // does not make sense. // assert(!block->IsLIR()); if (block->bbJumpKind != BBJ_SWITCH) { continue; } if (block->isRunRarely()) { continue; } if (!block->bbJumpSwt->bbsHasDominantCase) { continue; } // We currently will only see dominant cases with PGO. // assert(block->hasProfileWeight()); const unsigned dominantCase = block->bbJumpSwt->bbsDominantCase; JITDUMP(FMT_BB " has switch with dominant case %u, considering peeling\n", block->bbNum, dominantCase); // The dominant case should not be the default case, as we already peel that one. // assert(dominantCase < (block->bbJumpSwt->bbsCount - 1)); BasicBlock* const dominantTarget = block->bbJumpSwt->bbsDstTab[dominantCase]; Statement* const switchStmt = block->lastStmt(); GenTree* const switchTree = switchStmt->GetRootNode(); assert(switchTree->OperIs(GT_SWITCH)); GenTree* const switchValue = switchTree->AsOp()->gtGetOp1(); // Split the switch block just before at the switch. // // After this, newBlock is the switch block, and // block is the upstream block. // BasicBlock* newBlock = nullptr; if (block->firstStmt() == switchStmt) { newBlock = fgSplitBlockAtBeginning(block); } else { newBlock = fgSplitBlockAfterStatement(block, switchStmt->GetPrevStmt()); } // Set up a compare in the upstream block, "stealing" the switch value tree. // GenTree* const dominantCaseCompare = gtNewOperNode(GT_EQ, TYP_INT, switchValue, gtNewIconNode(dominantCase)); GenTree* const jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, dominantCaseCompare); Statement* const jmpStmt = fgNewStmtFromTree(jmpTree, switchStmt->GetDebugInfo()); fgInsertStmtAtEnd(block, jmpStmt); // Reattach switch value to the switch. This may introduce a comma // in the upstream compare tree, if the switch value expression is complex. // switchTree->AsOp()->gtOp1 = fgMakeMultiUse(&dominantCaseCompare->AsOp()->gtOp1); // Update flags // switchTree->gtFlags = switchTree->AsOp()->gtOp1->gtFlags; dominantCaseCompare->gtFlags |= dominantCaseCompare->AsOp()->gtOp1->gtFlags; jmpTree->gtFlags |= dominantCaseCompare->gtFlags; dominantCaseCompare->gtFlags |= GTF_RELOP_JMP_USED | GTF_DONT_CSE; // Wire up the new control flow. // block->bbJumpKind = BBJ_COND; block->bbJumpDest = dominantTarget; flowList* const blockToTargetEdge = fgAddRefPred(dominantTarget, block); flowList* const blockToNewBlockEdge = newBlock->bbPreds; assert(blockToNewBlockEdge->getBlock() == block); assert(blockToTargetEdge->getBlock() == block); // Update profile data // const weight_t fraction = newBlock->bbJumpSwt->bbsDominantFraction; const weight_t blockToTargetWeight = block->bbWeight * fraction; const weight_t blockToNewBlockWeight = block->bbWeight - blockToTargetWeight; newBlock->setBBProfileWeight(blockToNewBlockWeight); blockToTargetEdge->setEdgeWeights(blockToTargetWeight, blockToTargetWeight, dominantTarget); blockToNewBlockEdge->setEdgeWeights(blockToNewBlockWeight, blockToNewBlockWeight, block); // There may be other switch cases that lead to this same block, but there's just // one edge in the flowgraph. So we need to subtract off the profile data that now flows // along the peeled edge. // for (flowList* pred = dominantTarget->bbPreds; pred != nullptr; pred = pred->flNext) { if (pred->getBlock() == newBlock) { if (pred->flDupCount == 1) { // The only switch case leading to the dominant target was the one we peeled. // So the edge from the switch now has zero weight. // pred->setEdgeWeights(BB_ZERO_WEIGHT, BB_ZERO_WEIGHT, dominantTarget); } else { // Other switch cases also lead to the dominant target. // Subtract off the weight we transferred to the peel. // weight_t newMinWeight = pred->edgeWeightMin() - blockToTargetWeight; weight_t newMaxWeight = pred->edgeWeightMax() - blockToTargetWeight; if (newMinWeight < BB_ZERO_WEIGHT) { newMinWeight = BB_ZERO_WEIGHT; } if (newMaxWeight < BB_ZERO_WEIGHT) { newMaxWeight = BB_ZERO_WEIGHT; } pred->setEdgeWeights(newMinWeight, newMaxWeight, dominantTarget); } } } // For now we leave the switch as is, since there's no way // to indicate that one of the cases is now unreachable. // // But it no longer has a dominant case. // newBlock->bbJumpSwt->bbsHasDominantCase = false; modified = true; } return modified; } //----------------------------------------------------------------------------- // fgExpandRunRarelyBlocks: given the current set of run rarely blocks, // see if we can deduce that some other blocks are run rarely. // // Returns: // True if new block was marked as run rarely. // bool Compiler::fgExpandRarelyRunBlocks() { bool result = false; #ifdef DEBUG if (verbose) { printf("\n*************** In fgExpandRarelyRunBlocks()\n"); } const char* reason = nullptr; #endif // Helper routine to figure out the lexically earliest predecessor // of bPrev that could become run rarely, given that bPrev // has just become run rarely. // // Note this is potentially expensive for large flow graphs and blocks // with lots of predecessors. // auto newRunRarely = [](BasicBlock* block, BasicBlock* bPrev) { // Figure out earliest block that might be impacted BasicBlock* bPrevPrev = nullptr; BasicBlock* tmpbb; if ((bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0) { // If we've got a BBJ_CALLFINALLY/BBJ_ALWAYS pair, treat the BBJ_CALLFINALLY as an // additional predecessor for the BBJ_ALWAYS block tmpbb = bPrev->bbPrev; noway_assert(tmpbb != nullptr); #if defined(FEATURE_EH_FUNCLETS) noway_assert(tmpbb->isBBCallAlwaysPair()); bPrevPrev = tmpbb; #else if (tmpbb->bbJumpKind == BBJ_CALLFINALLY) { bPrevPrev = tmpbb; } #endif } flowList* pred = bPrev->bbPreds; if (pred != nullptr) { // bPrevPrev will be set to the lexically // earliest predecessor of bPrev. while (pred != nullptr) { if (bPrevPrev == nullptr) { // Initially we select the first block in the bbPreds list bPrevPrev = pred->getBlock(); continue; } // Walk the flow graph lexically forward from pred->getBlock() // if we find (block == bPrevPrev) then // pred->getBlock() is an earlier predecessor. for (tmpbb = pred->getBlock(); tmpbb != nullptr; tmpbb = tmpbb->bbNext) { if (tmpbb == bPrevPrev) { /* We found an ealier predecessor */ bPrevPrev = pred->getBlock(); break; } else if (tmpbb == bPrev) { // We have reached bPrev so stop walking // as this cannot be an earlier predecessor break; } } // Onto the next predecessor pred = pred->flNext; } } if (bPrevPrev != nullptr) { // Walk the flow graph forward from bPrevPrev // if we don't find (tmpbb == bPrev) then our candidate // bPrevPrev is lexically after bPrev and we do not // want to select it as our new block for (tmpbb = bPrevPrev; tmpbb != nullptr; tmpbb = tmpbb->bbNext) { if (tmpbb == bPrev) { // Set up block back to the lexically // earliest predecessor of pPrev return bPrevPrev; } } } // No reason to backtrack // return (BasicBlock*)nullptr; }; // We expand the number of rarely run blocks by observing // that a block that falls into or jumps to a rarely run block, // must itself be rarely run and when we have a conditional // jump in which both branches go to rarely run blocks then // the block must itself be rarely run BasicBlock* block; BasicBlock* bPrev; for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext) { if (bPrev->isRunRarely()) { continue; } if (bPrev->hasProfileWeight()) { continue; } const char* reason = nullptr; switch (bPrev->bbJumpKind) { case BBJ_ALWAYS: if (bPrev->bbJumpDest->isRunRarely()) { reason = "Unconditional jump to a rarely run block"; } break; case BBJ_CALLFINALLY: if (bPrev->isBBCallAlwaysPair() && block->isRunRarely()) { reason = "Call of finally followed by a rarely run block"; } break; case BBJ_NONE: if (block->isRunRarely()) { reason = "Falling into a rarely run block"; } break; case BBJ_COND: if (block->isRunRarely() && bPrev->bbJumpDest->isRunRarely()) { reason = "Both sides of a conditional jump are rarely run"; } break; default: break; } if (reason != nullptr) { JITDUMP("%s, marking " FMT_BB " as rarely run\n", reason, bPrev->bbNum); // Must not have previously been marked noway_assert(!bPrev->isRunRarely()); // Mark bPrev as a new rarely run block bPrev->bbSetRunRarely(); // We have marked at least one block. // result = true; // See if we should to backtrack. // BasicBlock* bContinue = newRunRarely(block, bPrev); // If so, reset block to the backtrack point. // if (bContinue != nullptr) { block = bContinue; } } } // Now iterate over every block to see if we can prove that a block is rarely run // (i.e. when all predecessors to the block are rarely run) // for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext) { // If block is not run rarely, then check to make sure that it has // at least one non-rarely run block. if (!block->isRunRarely()) { bool rare = true; /* Make sure that block has at least one normal predecessor */ for (BasicBlock* const predBlock : block->PredBlocks()) { /* Find the fall through predecessor, if any */ if (!predBlock->isRunRarely()) { rare = false; break; } } if (rare) { // If 'block' is the start of a handler or filter then we cannot make it // rarely run because we may have an exceptional edge that // branches here. // if (bbIsHandlerBeg(block)) { rare = false; } } if (rare) { block->bbSetRunRarely(); result = true; #ifdef DEBUG if (verbose) { printf("All branches to " FMT_BB " are from rarely run blocks, marking as rarely run\n", block->bbNum); } #endif // DEBUG // When marking a BBJ_CALLFINALLY as rarely run we also mark // the BBJ_ALWAYS that comes after it as rarely run // if (block->isBBCallAlwaysPair()) { BasicBlock* bNext = block->bbNext; PREFIX_ASSUME(bNext != nullptr); bNext->bbSetRunRarely(); #ifdef DEBUG if (verbose) { printf("Also marking the BBJ_ALWAYS at " FMT_BB " as rarely run\n", bNext->bbNum); } #endif // DEBUG } } } /* COMPACT blocks if possible */ if (bPrev->bbJumpKind == BBJ_NONE) { if (fgCanCompactBlocks(bPrev, block)) { fgCompactBlocks(bPrev, block); block = bPrev; continue; } } // // if bPrev->bbWeight is not based upon profile data we can adjust // the weights of bPrev and block // else if (bPrev->isBBCallAlwaysPair() && // we must have a BBJ_CALLFINALLY and BBK_ALWAYS pair (bPrev->bbWeight != block->bbWeight) && // the weights are currently different !bPrev->hasProfileWeight()) // and the BBJ_CALLFINALLY block is not using profiled // weights { if (block->isRunRarely()) { bPrev->bbWeight = block->bbWeight; // the BBJ_CALLFINALLY block now has the same weight as the BBJ_ALWAYS block bPrev->bbFlags |= BBF_RUN_RARELY; // and is now rarely run #ifdef DEBUG if (verbose) { printf("Marking the BBJ_CALLFINALLY block at " FMT_BB " as rarely run because " FMT_BB " is rarely run\n", bPrev->bbNum, block->bbNum); } #endif // DEBUG } else if (bPrev->isRunRarely()) { block->bbWeight = bPrev->bbWeight; // the BBJ_ALWAYS block now has the same weight as the BBJ_CALLFINALLY block block->bbFlags |= BBF_RUN_RARELY; // and is now rarely run #ifdef DEBUG if (verbose) { printf("Marking the BBJ_ALWAYS block at " FMT_BB " as rarely run because " FMT_BB " is rarely run\n", block->bbNum, bPrev->bbNum); } #endif // DEBUG } else // Both blocks are hot, bPrev is known not to be using profiled weight { bPrev->bbWeight = block->bbWeight; // the BBJ_CALLFINALLY block now has the same weight as the BBJ_ALWAYS block } noway_assert(block->bbWeight == bPrev->bbWeight); } } return result; } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif //----------------------------------------------------------------------------- // fgReorderBlocks: reorder blocks to favor frequent fall through paths, // move rare blocks to the end of the method/eh region, and move // funclets to the ends of methods. // // Returns: // True if anything got reordered. Reordering blocks may require changing // IR to reverse branch conditions. // bool Compiler::fgReorderBlocks() { noway_assert(opts.compDbgCode == false); #if defined(FEATURE_EH_FUNCLETS) assert(fgFuncletsCreated); #endif // FEATURE_EH_FUNCLETS // We can't relocate anything if we only have one block if (fgFirstBB->bbNext == nullptr) { return false; } bool newRarelyRun = false; bool movedBlocks = false; bool optimizedSwitches = false; bool optimizedBranches = false; // First let us expand the set of run rarely blocks newRarelyRun |= fgExpandRarelyRunBlocks(); #if !defined(FEATURE_EH_FUNCLETS) movedBlocks |= fgRelocateEHRegions(); #endif // !FEATURE_EH_FUNCLETS // // If we are using profile weights we can change some // switch jumps into conditional test and jump // if (fgIsUsingProfileWeights()) { // // Note that this is currently not yet implemented // optimizedSwitches = fgOptimizeSwitchJumps(); if (optimizedSwitches) { fgUpdateFlowGraph(); } } #ifdef DEBUG if (verbose) { printf("*************** In fgReorderBlocks()\n"); printf("\nInitial BasicBlocks"); fgDispBasicBlocks(verboseTrees); printf("\n"); } #endif // DEBUG BasicBlock* bNext; BasicBlock* bPrev; BasicBlock* block; unsigned XTnum; EHblkDsc* HBtab; // Iterate over every block, remembering our previous block in bPrev for (bPrev = fgFirstBB, block = bPrev->bbNext; block != nullptr; bPrev = block, block = block->bbNext) { // // Consider relocating the rarely run blocks such that they are at the end of the method. // We also consider reversing conditional branches so that they become a not taken forwards branch. // // If block is marked with a BBF_KEEP_BBJ_ALWAYS flag then we don't move the block if ((block->bbFlags & BBF_KEEP_BBJ_ALWAYS) != 0) { continue; } // Finally and handlers blocks are to be kept contiguous. // TODO-CQ: Allow reordering within the handler region if (block->hasHndIndex() == true) { continue; } bool reorderBlock = true; // This is set to false if we decide not to reorder 'block' bool isRare = block->isRunRarely(); BasicBlock* bDest = nullptr; bool forwardBranch = false; bool backwardBranch = false; // Setup bDest if (bPrev->KindIs(BBJ_COND, BBJ_ALWAYS)) { bDest = bPrev->bbJumpDest; forwardBranch = fgIsForwardBranch(bPrev); backwardBranch = !forwardBranch; } // We will look for bPrev as a non rarely run block followed by block as a rarely run block // if (bPrev->isRunRarely()) { reorderBlock = false; } // If the weights of the bPrev, block and bDest were all obtained from a profile run // then we can use them to decide if it is useful to reverse this conditional branch weight_t profHotWeight = -1; if (bPrev->hasProfileWeight() && block->hasProfileWeight() && ((bDest == nullptr) || bDest->hasProfileWeight())) { // // All blocks have profile information // if (forwardBranch) { if (bPrev->bbJumpKind == BBJ_ALWAYS) { // We can pull up the blocks that the unconditional jump branches to // if the weight of bDest is greater or equal to the weight of block // also the weight of bDest can't be zero. // if ((bDest->bbWeight < block->bbWeight) || (bDest->bbWeight == BB_ZERO_WEIGHT)) { reorderBlock = false; } else { // // If this remains true then we will try to pull up bDest to succeed bPrev // bool moveDestUp = true; if (fgHaveValidEdgeWeights) { // // The edge bPrev -> bDest must have a higher minimum weight // than every other edge into bDest // flowList* edgeFromPrev = fgGetPredForBlock(bDest, bPrev); noway_assert(edgeFromPrev != nullptr); // Examine all of the other edges into bDest for (flowList* const edge : bDest->PredEdges()) { if (edge != edgeFromPrev) { if (edge->edgeWeightMax() >= edgeFromPrev->edgeWeightMin()) { moveDestUp = false; break; } } } } else { // // The block bPrev must have a higher weight // than every other block that goes into bDest // // Examine all of the other edges into bDest for (BasicBlock* const predBlock : bDest->PredBlocks()) { if ((predBlock != bPrev) && (predBlock->bbWeight >= bPrev->bbWeight)) { moveDestUp = false; break; } } } // Are we still good to move bDest up to bPrev? if (moveDestUp) { // // We will consider all blocks that have less weight than profHotWeight to be // uncommonly run blocks as compared with the hot path of bPrev taken-jump to bDest // profHotWeight = bDest->bbWeight - 1; } else { if (block->isRunRarely()) { // We will move any rarely run blocks blocks profHotWeight = 0; } else { // We will move all blocks that have a weight less or equal to our fall through block profHotWeight = block->bbWeight + 1; } // But we won't try to connect with bDest bDest = nullptr; } } } else // (bPrev->bbJumpKind == BBJ_COND) { noway_assert(bPrev->bbJumpKind == BBJ_COND); // // We will reverse branch if the taken-jump to bDest ratio (i.e. 'takenRatio') // is more than 51% // // We will setup profHotWeight to be maximum bbWeight that a block // could have for us not to want to reverse the conditional branch // // We will consider all blocks that have less weight than profHotWeight to be // uncommonly run blocks as compared with the hot path of bPrev taken-jump to bDest // if (fgHaveValidEdgeWeights) { // We have valid edge weights, however even with valid edge weights // we may have a minimum and maximum range for each edges value // // We will check that the min weight of the bPrev to bDest edge // is more than twice the max weight of the bPrev to block edge. // // bPrev --> [BB04, weight 31] // | \. // edgeToBlock -------------> O \. // [min=8,max=10] V \. // block --> [BB05, weight 10] \. // \. // edgeToDest ----------------------------> O // [min=21,max=23] | // V // bDest ---------------> [BB08, weight 21] // flowList* edgeToDest = fgGetPredForBlock(bDest, bPrev); flowList* edgeToBlock = fgGetPredForBlock(block, bPrev); noway_assert(edgeToDest != nullptr); noway_assert(edgeToBlock != nullptr); // // Calculate the taken ratio // A takenRation of 0.10 means taken 10% of the time, not taken 90% of the time // A takenRation of 0.50 means taken 50% of the time, not taken 50% of the time // A takenRation of 0.90 means taken 90% of the time, not taken 10% of the time // double takenCount = ((double)edgeToDest->edgeWeightMin() + (double)edgeToDest->edgeWeightMax()) / 2.0; double notTakenCount = ((double)edgeToBlock->edgeWeightMin() + (double)edgeToBlock->edgeWeightMax()) / 2.0; double totalCount = takenCount + notTakenCount; double takenRatio = takenCount / totalCount; // If the takenRatio is greater or equal to 51% then we will reverse the branch if (takenRatio < 0.51) { reorderBlock = false; } else { // set profHotWeight profHotWeight = (edgeToBlock->edgeWeightMin() + edgeToBlock->edgeWeightMax()) / 2 - 1; } } else { // We don't have valid edge weight so we will be more conservative // We could have bPrev, block or bDest as part of a loop and thus have extra weight // // We will do two checks: // 1. Check that the weight of bDest is at least two times more than block // 2. Check that the weight of bPrev is at least three times more than block // // bPrev --> [BB04, weight 31] // | \. // V \. // block --> [BB05, weight 10] \. // \. // | // V // bDest ---------------> [BB08, weight 21] // // For this case weightDest is calculated as (21+1)/2 or 11 // and weightPrev is calculated as (31+2)/3 also 11 // // Generally both weightDest and weightPrev should calculate // the same value unless bPrev or bDest are part of a loop // weight_t weightDest = bDest->isMaxBBWeight() ? bDest->bbWeight : (bDest->bbWeight + 1) / 2; weight_t weightPrev = bPrev->isMaxBBWeight() ? bPrev->bbWeight : (bPrev->bbWeight + 2) / 3; // select the lower of weightDest and weightPrev profHotWeight = (weightDest < weightPrev) ? weightDest : weightPrev; // if the weight of block is greater (or equal) to profHotWeight then we don't reverse the cond if (block->bbWeight >= profHotWeight) { reorderBlock = false; } } } } else // not a forwardBranch { if (bPrev->bbFallsThrough()) { goto CHECK_FOR_RARE; } // Here we should pull up the highest weight block remaining // and place it here since bPrev does not fall through. weight_t highestWeight = 0; BasicBlock* candidateBlock = nullptr; BasicBlock* lastNonFallThroughBlock = bPrev; BasicBlock* bTmp = bPrev->bbNext; while (bTmp != nullptr) { // Don't try to split a Call/Always pair // if (bTmp->isBBCallAlwaysPair()) { // Move bTmp forward bTmp = bTmp->bbNext; } // // Check for loop exit condition // if (bTmp == nullptr) { break; } // // if its weight is the highest one we've seen and // the EH regions allow for us to place bTmp after bPrev // if ((bTmp->bbWeight > highestWeight) && fgEhAllowsMoveBlock(bPrev, bTmp)) { // When we have a current candidateBlock that is a conditional (or unconditional) jump // to bTmp (which is a higher weighted block) then it is better to keep out current // candidateBlock and have it fall into bTmp // if ((candidateBlock == nullptr) || !candidateBlock->KindIs(BBJ_COND, BBJ_ALWAYS) || (candidateBlock->bbJumpDest != bTmp)) { // otherwise we have a new candidateBlock // highestWeight = bTmp->bbWeight; candidateBlock = lastNonFallThroughBlock->bbNext; } } if ((bTmp->bbFallsThrough() == false) || (bTmp->bbWeight == BB_ZERO_WEIGHT)) { lastNonFallThroughBlock = bTmp; } bTmp = bTmp->bbNext; } // If we didn't find a suitable block then skip this if (highestWeight == 0) { reorderBlock = false; } else { noway_assert(candidateBlock != nullptr); // If the candidateBlock is the same a block then skip this if (candidateBlock == block) { reorderBlock = false; } else { // Set bDest to the block that we want to come after bPrev bDest = candidateBlock; // set profHotWeight profHotWeight = highestWeight - 1; } } } } else // we don't have good profile info (or we are falling through) { CHECK_FOR_RARE:; /* We only want to reorder when we have a rarely run */ /* block right after a normal block, */ /* (bPrev is known to be a normal block at this point) */ if (!isRare) { if ((bDest == block->bbNext) && (block->bbJumpKind == BBJ_RETURN) && (bPrev->bbJumpKind == BBJ_ALWAYS)) { // This is a common case with expressions like "return Expr1 && Expr2" -- move the return // to establish fall-through. } else { reorderBlock = false; } } else { /* If the jump target bDest is also a rarely run block then we don't want to do the reversal */ if (bDest && bDest->isRunRarely()) { reorderBlock = false; /* Both block and bDest are rarely run */ } else { // We will move any rarely run blocks blocks profHotWeight = 0; } } } if (reorderBlock == false) { // // Check for an unconditional branch to a conditional branch // which also branches back to our next block // const bool optimizedBranch = fgOptimizeBranch(bPrev); if (optimizedBranch) { noway_assert(bPrev->bbJumpKind == BBJ_COND); optimizedBranches = true; } continue; } // Now we need to determine which blocks should be moved // // We consider one of two choices: // // 1. Moving the fall-through blocks (or rarely run blocks) down to // later in the method and hopefully connecting the jump dest block // so that it becomes the fall through block // // And when bDest in not NULL, we also consider: // // 2. Moving the bDest block (or blocks) up to bPrev // so that it could be used as a fall through block // // We will prefer option #1 if we are able to connect the jump dest // block as the fall though block otherwise will we try to use option #2 // // // Consider option #1: relocating blocks starting at 'block' // to later in flowgraph // // We set bStart to the first block that will be relocated // and bEnd to the last block that will be relocated BasicBlock* bStart = block; BasicBlock* bEnd = bStart; bNext = bEnd->bbNext; bool connected_bDest = false; if ((backwardBranch && !isRare) || ((block->bbFlags & BBF_DONT_REMOVE) != 0)) // Don't choose option #1 when block is the start of a try region { bStart = nullptr; bEnd = nullptr; } else { while (true) { // Don't try to split a Call/Always pair // if (bEnd->isBBCallAlwaysPair()) { // Move bEnd and bNext forward bEnd = bNext; bNext = bNext->bbNext; } // // Check for loop exit condition // if (bNext == nullptr) { break; } #if defined(FEATURE_EH_FUNCLETS) // Check if we've reached the funclets region, at the end of the function if (fgFirstFuncletBB == bEnd->bbNext) { break; } #endif // FEATURE_EH_FUNCLETS if (bNext == bDest) { connected_bDest = true; break; } // All the blocks must have the same try index // and must not have the BBF_DONT_REMOVE flag set if (!BasicBlock::sameTryRegion(bStart, bNext) || ((bNext->bbFlags & BBF_DONT_REMOVE) != 0)) { // exit the loop, bEnd is now set to the // last block that we want to relocate break; } // If we are relocating rarely run blocks.. if (isRare) { // ... then all blocks must be rarely run if (!bNext->isRunRarely()) { // exit the loop, bEnd is now set to the // last block that we want to relocate break; } } else { // If we are moving blocks that are hot then all // of the blocks moved must be less than profHotWeight */ if (bNext->bbWeight >= profHotWeight) { // exit the loop, bEnd is now set to the // last block that we would relocate break; } } // Move bEnd and bNext forward bEnd = bNext; bNext = bNext->bbNext; } // Set connected_bDest to true if moving blocks [bStart .. bEnd] // connects with the the jump dest of bPrev (i.e bDest) and // thus allows bPrev fall through instead of jump. if (bNext == bDest) { connected_bDest = true; } } // Now consider option #2: Moving the jump dest block (or blocks) // up to bPrev // // The variables bStart2, bEnd2 and bPrev2 are used for option #2 // // We will setup bStart2 to the first block that will be relocated // and bEnd2 to the last block that will be relocated // and bPrev2 to be the lexical pred of bDest // // If after this calculation bStart2 is NULL we cannot use option #2, // otherwise bStart2, bEnd2 and bPrev2 are all non-NULL and we will use option #2 BasicBlock* bStart2 = nullptr; BasicBlock* bEnd2 = nullptr; BasicBlock* bPrev2 = nullptr; // If option #1 didn't connect bDest and bDest isn't NULL if ((connected_bDest == false) && (bDest != nullptr) && // The jump target cannot be moved if it has the BBF_DONT_REMOVE flag set ((bDest->bbFlags & BBF_DONT_REMOVE) == 0)) { // We will consider option #2: relocating blocks starting at 'bDest' to succeed bPrev // // setup bPrev2 to be the lexical pred of bDest bPrev2 = block; while (bPrev2 != nullptr) { if (bPrev2->bbNext == bDest) { break; } bPrev2 = bPrev2->bbNext; } if ((bPrev2 != nullptr) && fgEhAllowsMoveBlock(bPrev, bDest)) { // We have decided that relocating bDest to be after bPrev is best // Set bStart2 to the first block that will be relocated // and bEnd2 to the last block that will be relocated // // Assigning to bStart2 selects option #2 // bStart2 = bDest; bEnd2 = bStart2; bNext = bEnd2->bbNext; while (true) { // Don't try to split a Call/Always pair // if (bEnd2->isBBCallAlwaysPair()) { noway_assert(bNext->bbJumpKind == BBJ_ALWAYS); // Move bEnd2 and bNext forward bEnd2 = bNext; bNext = bNext->bbNext; } // Check for the Loop exit conditions if (bNext == nullptr) { break; } if (bEnd2->bbFallsThrough() == false) { break; } // If we are relocating rarely run blocks.. // All the blocks must have the same try index, // and must not have the BBF_DONT_REMOVE flag set if (!BasicBlock::sameTryRegion(bStart2, bNext) || ((bNext->bbFlags & BBF_DONT_REMOVE) != 0)) { // exit the loop, bEnd2 is now set to the // last block that we want to relocate break; } if (isRare) { /* ... then all blocks must not be rarely run */ if (bNext->isRunRarely()) { // exit the loop, bEnd2 is now set to the // last block that we want to relocate break; } } else { // If we are relocating hot blocks // all blocks moved must be greater than profHotWeight if (bNext->bbWeight <= profHotWeight) { // exit the loop, bEnd2 is now set to the // last block that we want to relocate break; } } // Move bEnd2 and bNext forward bEnd2 = bNext; bNext = bNext->bbNext; } } } // If we are using option #1 then ... if (bStart2 == nullptr) { // Don't use option #1 for a backwards branch if (bStart == nullptr) { continue; } // .... Don't move a set of blocks that are already at the end of the main method if (bEnd == fgLastBBInMainFunction()) { continue; } } #ifdef DEBUG if (verbose) { if (bDest != nullptr) { if (bPrev->bbJumpKind == BBJ_COND) { printf("Decided to reverse conditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); } else if (bPrev->bbJumpKind == BBJ_ALWAYS) { printf("Decided to straighten unconditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); } else { printf("Decided to place hot code after " FMT_BB ", placed " FMT_BB " after this block ", bPrev->bbNum, bDest->bbNum); } if (profHotWeight > 0) { printf("because of IBC profile data\n"); } else { if (bPrev->bbFallsThrough()) { printf("since it falls into a rarely run block\n"); } else { printf("since it is succeeded by a rarely run block\n"); } } } else { printf("Decided to relocate block(s) after block " FMT_BB " since they are %s block(s)\n", bPrev->bbNum, block->isRunRarely() ? "rarely run" : "uncommonly run"); } } #endif // DEBUG // We will set insertAfterBlk to the block the precedes our insertion range // We will set bStartPrev to be the block that precedes the set of blocks that we are moving BasicBlock* insertAfterBlk; BasicBlock* bStartPrev; if (bStart2 != nullptr) { // Option #2: relocating blocks starting at 'bDest' to follow bPrev // Update bStart and bEnd so that we can use these two for all later operations bStart = bStart2; bEnd = bEnd2; // Set bStartPrev to be the block that comes before bStart bStartPrev = bPrev2; // We will move [bStart..bEnd] to immediately after bPrev insertAfterBlk = bPrev; } else { // option #1: Moving the fall-through blocks (or rarely run blocks) down to later in the method // Set bStartPrev to be the block that come before bStart bStartPrev = bPrev; // We will move [bStart..bEnd] but we will pick the insert location later insertAfterBlk = nullptr; } // We are going to move [bStart..bEnd] so they can't be NULL noway_assert(bStart != nullptr); noway_assert(bEnd != nullptr); // bEnd can't be a BBJ_CALLFINALLY unless it is a RETLESS call noway_assert((bEnd->bbJumpKind != BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); // bStartPrev must be set to the block that precedes bStart noway_assert(bStartPrev->bbNext == bStart); // Since we will be unlinking [bStart..bEnd], // we need to compute and remember if bStart is in each of // the try and handler regions // bool* fStartIsInTry = nullptr; bool* fStartIsInHnd = nullptr; if (compHndBBtabCount > 0) { fStartIsInTry = new (this, CMK_Unknown) bool[compHndBBtabCount]; fStartIsInHnd = new (this, CMK_Unknown) bool[compHndBBtabCount]; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { fStartIsInTry[XTnum] = HBtab->InTryRegionBBRange(bStart); fStartIsInHnd[XTnum] = HBtab->InHndRegionBBRange(bStart); } } /* Temporarily unlink [bStart..bEnd] from the flow graph */ fgUnlinkRange(bStart, bEnd); if (insertAfterBlk == nullptr) { // Find new location for the unlinked block(s) // Set insertAfterBlk to the block which will precede the insertion point if (!bStart->hasTryIndex() && isRare) { // We'll just insert the blocks at the end of the method. If the method // has funclets, we will insert at the end of the main method but before // any of the funclets. Note that we create funclets before we call // fgReorderBlocks(). insertAfterBlk = fgLastBBInMainFunction(); noway_assert(insertAfterBlk != bPrev); } else { BasicBlock* startBlk; BasicBlock* lastBlk; EHblkDsc* ehDsc = ehInitTryBlockRange(bStart, &startBlk, &lastBlk); BasicBlock* endBlk; /* Setup startBlk and endBlk as the range to search */ if (ehDsc != nullptr) { endBlk = lastBlk->bbNext; /* Multiple (nested) try regions might start from the same BB. For example, try3 try2 try1 |--- |--- |--- BB01 | | | BB02 | | |--- BB03 | | BB04 | |------------ BB05 | BB06 |------------------- BB07 Now if we want to insert in try2 region, we will start with startBlk=BB01. The following loop will allow us to start from startBlk==BB04. */ while (!BasicBlock::sameTryRegion(startBlk, bStart) && (startBlk != endBlk)) { startBlk = startBlk->bbNext; } // startBlk cannot equal endBlk as it must come before endBlk if (startBlk == endBlk) { goto CANNOT_MOVE; } // we also can't start searching the try region at bStart if (startBlk == bStart) { // if bEnd is the last block in the method or // or if bEnd->bbNext is in a different try region // then we cannot move the blocks // if ((bEnd->bbNext == nullptr) || !BasicBlock::sameTryRegion(startBlk, bEnd->bbNext)) { goto CANNOT_MOVE; } startBlk = bEnd->bbNext; // Check that the new startBlk still comes before endBlk // startBlk cannot equal endBlk as it must come before endBlk if (startBlk == endBlk) { goto CANNOT_MOVE; } BasicBlock* tmpBlk = startBlk; while ((tmpBlk != endBlk) && (tmpBlk != nullptr)) { tmpBlk = tmpBlk->bbNext; } // when tmpBlk is NULL that means startBlk is after endBlk // so there is no way to move bStart..bEnd within the try region if (tmpBlk == nullptr) { goto CANNOT_MOVE; } } } else { noway_assert(isRare == false); /* We'll search through the entire main method */ startBlk = fgFirstBB; endBlk = fgEndBBAfterMainFunction(); } // Calculate nearBlk and jumpBlk and then call fgFindInsertPoint() // to find our insertion block // { // If the set of blocks that we are moving ends with a BBJ_ALWAYS to // another [rarely run] block that comes after bPrev (forward branch) // then we can set up nearBlk to eliminate this jump sometimes // BasicBlock* nearBlk = nullptr; BasicBlock* jumpBlk = nullptr; if ((bEnd->bbJumpKind == BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && fgIsForwardBranch(bEnd, bPrev)) { // Set nearBlk to be the block in [startBlk..endBlk] // such that nearBlk->bbNext == bEnd->JumpDest // if no such block exists then set nearBlk to NULL nearBlk = startBlk; jumpBlk = bEnd; do { // We do not want to set nearBlk to bPrev // since then we will not move [bStart..bEnd] // if (nearBlk != bPrev) { // Check if nearBlk satisfies our requirement if (nearBlk->bbNext == bEnd->bbJumpDest) { break; } } // Did we reach the endBlk? if (nearBlk == endBlk) { nearBlk = nullptr; break; } // advance nearBlk to the next block nearBlk = nearBlk->bbNext; } while (nearBlk != nullptr); } // if nearBlk is NULL then we set nearBlk to be the // first block that we want to insert after. if (nearBlk == nullptr) { if (bDest != nullptr) { // we want to insert after bDest nearBlk = bDest; } else { // we want to insert after bPrev nearBlk = bPrev; } } /* Set insertAfterBlk to the block which we will insert after. */ insertAfterBlk = fgFindInsertPoint(bStart->bbTryIndex, true, // Insert in the try region. startBlk, endBlk, nearBlk, jumpBlk, bStart->bbWeight == BB_ZERO_WEIGHT); } /* See if insertAfterBlk is the same as where we started, */ /* or if we could not find any insertion point */ if ((insertAfterBlk == bPrev) || (insertAfterBlk == nullptr)) { CANNOT_MOVE:; /* We couldn't move the blocks, so put everything back */ /* relink [bStart .. bEnd] into the flow graph */ bPrev->setNext(bStart); if (bEnd->bbNext) { bEnd->bbNext->bbPrev = bEnd; } #ifdef DEBUG if (verbose) { if (bStart != bEnd) { printf("Could not relocate blocks (" FMT_BB " .. " FMT_BB ")\n", bStart->bbNum, bEnd->bbNum); } else { printf("Could not relocate block " FMT_BB "\n", bStart->bbNum); } } #endif // DEBUG continue; } } } noway_assert(insertAfterBlk != nullptr); noway_assert(bStartPrev != nullptr); noway_assert(bStartPrev != insertAfterBlk); #ifdef DEBUG movedBlocks = true; if (verbose) { const char* msg; if (bStart2 != nullptr) { msg = "hot"; } else { if (isRare) { msg = "rarely run"; } else { msg = "uncommon"; } } printf("Relocated %s ", msg); if (bStart != bEnd) { printf("blocks (" FMT_BB " .. " FMT_BB ")", bStart->bbNum, bEnd->bbNum); } else { printf("block " FMT_BB, bStart->bbNum); } if (bPrev->bbJumpKind == BBJ_COND) { printf(" by reversing conditional jump at " FMT_BB "\n", bPrev->bbNum); } else { printf("\n", bPrev->bbNum); } } #endif // DEBUG if (bPrev->bbJumpKind == BBJ_COND) { /* Reverse the bPrev jump condition */ Statement* condTestStmt = bPrev->lastStmt(); GenTree* condTest = condTestStmt->GetRootNode(); noway_assert(condTest->gtOper == GT_JTRUE); condTest->AsOp()->gtOp1 = gtReverseCond(condTest->AsOp()->gtOp1); if (bStart2 == nullptr) { /* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */ bPrev->bbJumpDest = bStart; } else { noway_assert(insertAfterBlk == bPrev); noway_assert(insertAfterBlk->bbNext == block); /* Set the new jump dest for bPrev to the rarely run or uncommon block(s) */ bPrev->bbJumpDest = block; } } // If we are moving blocks that are at the end of a try or handler // we will need to shorten ebdTryLast or ebdHndLast // ehUpdateLastBlocks(bEnd, bStartPrev); // If we are moving blocks into the end of a try region or handler region // we will need to extend ebdTryLast or ebdHndLast so the blocks that we // are moving are part of this try or handler region. // for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Are we moving blocks to the end of a try region? if (HBtab->ebdTryLast == insertAfterBlk) { if (fStartIsInTry[XTnum]) { // bStart..bEnd is in the try, so extend the try region fgSetTryEnd(HBtab, bEnd); } } // Are we moving blocks to the end of a handler region? if (HBtab->ebdHndLast == insertAfterBlk) { if (fStartIsInHnd[XTnum]) { // bStart..bEnd is in the handler, so extend the handler region fgSetHndEnd(HBtab, bEnd); } } } /* We have decided to insert the block(s) after 'insertAfterBlk' */ fgMoveBlocksAfter(bStart, bEnd, insertAfterBlk); if (bDest) { /* We may need to insert an unconditional branch after bPrev to bDest */ fgConnectFallThrough(bPrev, bDest); } else { /* If bPrev falls through, we must insert a jump to block */ fgConnectFallThrough(bPrev, block); } BasicBlock* bSkip = bEnd->bbNext; /* If bEnd falls through, we must insert a jump to bNext */ fgConnectFallThrough(bEnd, bNext); if (bStart2 == nullptr) { /* If insertAfterBlk falls through, we are forced to */ /* add a jump around the block(s) we just inserted */ fgConnectFallThrough(insertAfterBlk, bSkip); } else { /* We may need to insert an unconditional branch after bPrev2 to bStart */ fgConnectFallThrough(bPrev2, bStart); } #if DEBUG if (verbose) { printf("\nAfter this change in fgReorderBlocks the BB graph is:"); fgDispBasicBlocks(verboseTrees); printf("\n"); } fgVerifyHandlerTab(); // Make sure that the predecessor lists are accurate if (expensiveDebugCheckLevel >= 2) { fgDebugCheckBBlist(); } #endif // DEBUG // Set our iteration point 'block' to be the new bPrev->bbNext // It will be used as the next bPrev block = bPrev->bbNext; } // end of for loop(bPrev,block) const bool changed = movedBlocks || newRarelyRun || optimizedSwitches || optimizedBranches; if (changed) { fgNeedsUpdateFlowGraph = true; #if DEBUG // Make sure that the predecessor lists are accurate if (expensiveDebugCheckLevel >= 2) { fgDebugCheckBBlist(); } #endif // DEBUG } return changed; } #ifdef _PREFAST_ #pragma warning(pop) #endif //------------------------------------------------------------- // fgUpdateFlowGraph: Removes any empty blocks, unreachable blocks, and redundant jumps. // Most of those appear after dead store removal and folding of conditionals. // Also, compact consecutive basic blocks. // // Arguments: // doTailDuplication - true to attempt tail duplication optimization // // Returns: true if the flowgraph has been modified // // Notes: // Debuggable code and Min Optimization JIT also introduces basic blocks // but we do not optimize those! // bool Compiler::fgUpdateFlowGraph(bool doTailDuplication) { #ifdef DEBUG if (verbose) { printf("\n*************** In fgUpdateFlowGraph()"); } #endif // DEBUG /* This should never be called for debuggable code */ noway_assert(opts.OptimizationEnabled()); #ifdef DEBUG if (verbose) { printf("\nBefore updating the flow graph:\n"); fgDispBasicBlocks(verboseTrees); printf("\n"); } #endif // DEBUG /* Walk all the basic blocks - look for unconditional jumps, empty blocks, blocks to compact, etc... * * OBSERVATION: * Once a block is removed the predecessors are not accurate (assuming they were at the beginning) * For now we will only use the information in bbRefs because it is easier to be updated */ bool modified = false; bool change; do { change = false; BasicBlock* block; // the current block BasicBlock* bPrev = nullptr; // the previous non-worthless block BasicBlock* bNext; // the successor of the current block BasicBlock* bDest; // the jump target of the current block for (block = fgFirstBB; block != nullptr; block = block->bbNext) { /* Some blocks may be already marked removed by other optimizations * (e.g worthless loop removal), without being explicitly removed * from the list. */ if (block->bbFlags & BBF_REMOVED) { if (bPrev) { bPrev->setNext(block->bbNext); } else { /* WEIRD first basic block is removed - should have an assert here */ noway_assert(!"First basic block marked as BBF_REMOVED???"); fgFirstBB = block->bbNext; } continue; } /* We jump to the REPEAT label if we performed a change involving the current block * This is in case there are other optimizations that can show up * (e.g. - compact 3 blocks in a row) * If nothing happens, we then finish the iteration and move to the next block */ REPEAT:; bNext = block->bbNext; bDest = nullptr; if (block->bbJumpKind == BBJ_ALWAYS) { bDest = block->bbJumpDest; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, bDest)) { change = true; modified = true; bDest = block->bbJumpDest; bNext = block->bbNext; } } if (block->bbJumpKind == BBJ_NONE) { bDest = nullptr; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->bbNext)) { change = true; modified = true; bDest = block->bbJumpDest; bNext = block->bbNext; } } // Remove JUMPS to the following block // and optimize any JUMPS to JUMPS if (block->KindIs(BBJ_COND, BBJ_ALWAYS)) { bDest = block->bbJumpDest; if (bDest == bNext) { if (fgOptimizeBranchToNext(block, bNext, bPrev)) { change = true; modified = true; bDest = nullptr; } } } if (bDest != nullptr) { // Do we have a JUMP to an empty unconditional JUMP block? if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { if (fgOptimizeBranchToEmptyUnconditional(block, bDest)) { change = true; modified = true; goto REPEAT; } } // Check for cases where reversing the branch condition may enable // other flow opts. // // Current block falls through to an empty bNext BBJ_ALWAYS, and // (a) block jump target is bNext's bbNext. // (b) block jump target is elsewhere but join free, and // bNext's jump target has a join. // if ((block->bbJumpKind == BBJ_COND) && // block is a BBJ_COND block (bNext != nullptr) && // block is not the last block (bNext->bbRefs == 1) && // No other block jumps to bNext (bNext->bbJumpKind == BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block bNext->isEmpty() && // and it is an an empty block (bNext != bNext->bbJumpDest) && // special case for self jumps (bDest != fgFirstColdBlock)) { // case (a) // const bool isJumpAroundEmpty = (bNext->bbNext == bDest); // case (b) // // Note the asymetric checks for refs == 1 and refs > 1 ensures that we // differentiate the roles played by bDest and bNextJumpDest. We need some // sense of which arrangement is preferable to avoid getting stuck in a loop // reversing and re-reversing. // // Other tiebreaking criteria could be considered. // // Pragmatic constraints: // // * don't consider lexical predecessors, or we may confuse loop recognition // * don't consider blocks of different rarities // BasicBlock* const bNextJumpDest = bNext->bbJumpDest; const bool isJumpToJoinFree = !isJumpAroundEmpty && (bDest->bbRefs == 1) && (bNextJumpDest->bbRefs > 1) && (bDest->bbNum > block->bbNum) && (block->isRunRarely() == bDest->isRunRarely()); bool optimizeJump = isJumpAroundEmpty || isJumpToJoinFree; // We do not optimize jumps between two different try regions. // However jumping to a block that is not in any try region is OK // if (bDest->hasTryIndex() && !BasicBlock::sameTryRegion(block, bDest)) { optimizeJump = false; } // Also consider bNext's try region // if (bNext->hasTryIndex() && !BasicBlock::sameTryRegion(block, bNext)) { optimizeJump = false; } // If we are optimizing using real profile weights // then don't optimize a conditional jump to an unconditional jump // until after we have computed the edge weights // if (fgIsUsingProfileWeights()) { // if block and bdest are in different hot/cold regions we can't do this this optimization // because we can't allow fall-through into the cold region. if (!fgEdgeWeightsComputed || fgInDifferentRegions(block, bDest)) { fgNeedsUpdateFlowGraph = true; optimizeJump = false; } } if (optimizeJump && isJumpToJoinFree) { // In the join free case, we also need to move bDest right after bNext // to create same flow as in the isJumpAroundEmpty case. // if (!fgEhAllowsMoveBlock(bNext, bDest) || bDest->isBBCallAlwaysPair()) { optimizeJump = false; } else { // We don't expect bDest to already be right after bNext. // assert(bDest != bNext->bbNext); JITDUMP("\nMoving " FMT_BB " after " FMT_BB " to enable reversal\n", bDest->bbNum, bNext->bbNum); // If bDest can fall through we'll need to create a jump // block after it too. Remember where to jump to. // BasicBlock* const bDestNext = bDest->bbNext; // Move bDest // if (ehIsBlockEHLast(bDest)) { ehUpdateLastBlocks(bDest, bDest->bbPrev); } fgUnlinkBlock(bDest); fgInsertBBafter(bNext, bDest); if (ehIsBlockEHLast(bNext)) { ehUpdateLastBlocks(bNext, bDest); } // Add fall through fixup block, if needed. // if (bDest->KindIs(BBJ_NONE, BBJ_COND)) { BasicBlock* const bFixup = fgNewBBafter(BBJ_ALWAYS, bDest, true); bFixup->inheritWeight(bDestNext); bFixup->bbJumpDest = bDestNext; fgRemoveRefPred(bDestNext, bDest); fgAddRefPred(bFixup, bDest); fgAddRefPred(bDestNext, bFixup); } } } if (optimizeJump) { JITDUMP("\nReversing a conditional jump around an unconditional jump (" FMT_BB " -> " FMT_BB ", " FMT_BB " -> " FMT_BB ")\n", block->bbNum, bDest->bbNum, bNext->bbNum, bNextJumpDest->bbNum); // Reverse the jump condition // GenTree* test = block->lastNode(); noway_assert(test->OperIsConditionalJump()); if (test->OperGet() == GT_JTRUE) { GenTree* cond = gtReverseCond(test->AsOp()->gtOp1); assert(cond == test->AsOp()->gtOp1); // Ensure `gtReverseCond` did not create a new node. test->AsOp()->gtOp1 = cond; } else { gtReverseCond(test); } // Optimize the Conditional JUMP to go to the new target block->bbJumpDest = bNext->bbJumpDest; fgAddRefPred(bNext->bbJumpDest, block, fgRemoveRefPred(bNext->bbJumpDest, bNext)); /* Unlink bNext from the BasicBlock list; note that we can do this even though other blocks could jump to it - the reason is that elsewhere in this function we always redirect jumps to jumps to jump to the final label, so even if another block jumps to bNext it won't matter once we're done since any such jump will be redirected to the final target by the time we're done here. */ fgRemoveRefPred(bNext, block); fgUnlinkBlock(bNext); /* Mark the block as removed */ bNext->bbFlags |= BBF_REMOVED; // Update the loop table if we removed the bottom of a loop, for example. fgUpdateLoopsAfterCompacting(block, bNext); // If this block was aligned, unmark it bNext->unmarkLoopAlign(this DEBUG_ARG("Optimized jump")); // If this is the first Cold basic block update fgFirstColdBlock if (bNext == fgFirstColdBlock) { fgFirstColdBlock = bNext->bbNext; } // // If we removed the end of a try region or handler region // we will need to update ebdTryLast or ebdHndLast. // for (EHblkDsc* const HBtab : EHClauses(this)) { if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext)) { fgSkipRmvdBlocks(HBtab); } } // we optimized this JUMP - goto REPEAT to catch similar cases change = true; modified = true; #ifdef DEBUG if (verbose) { printf("\nAfter reversing the jump:\n"); fgDispBasicBlocks(verboseTrees); } #endif // DEBUG /* For a rare special case we cannot jump to REPEAT as jumping to REPEAT will cause us to delete 'block' because it currently appears to be unreachable. As it is a self loop that only has a single bbRef (itself) However since the unlinked bNext has additional bbRefs (that we will later connect to 'block'), it is not really unreachable. */ if ((bNext->bbRefs > 0) && (bNext->bbJumpDest == block) && (block->bbRefs == 1)) { continue; } goto REPEAT; } } } // // Update the switch jump table such that it follows jumps to jumps: // if (block->bbJumpKind == BBJ_SWITCH) { if (fgOptimizeSwitchBranches(block)) { change = true; modified = true; goto REPEAT; } } noway_assert(!(block->bbFlags & BBF_REMOVED)); /* COMPACT blocks if possible */ if (fgCanCompactBlocks(block, bNext)) { fgCompactBlocks(block, bNext); /* we compacted two blocks - goto REPEAT to catch similar cases */ change = true; modified = true; goto REPEAT; } /* Remove unreachable or empty blocks - do not consider blocks marked BBF_DONT_REMOVE or genReturnBB block * These include first and last block of a TRY, exception handlers and RANGE_CHECK_FAIL THROW blocks */ if ((block->bbFlags & BBF_DONT_REMOVE) == BBF_DONT_REMOVE || block == genReturnBB) { bPrev = block; continue; } #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't remove the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. if (block->countOfInEdges() == 0 && bPrev->bbJumpKind == BBJ_CALLFINALLY) { assert(bPrev->isBBCallAlwaysPair()); noway_assert(!(bPrev->bbFlags & BBF_RETLESS_CALL)); noway_assert(block->bbJumpKind == BBJ_ALWAYS); bPrev = block; continue; } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) noway_assert(!block->bbCatchTyp); noway_assert(!(block->bbFlags & BBF_TRY_BEG)); /* Remove unreachable blocks * * We'll look for blocks that have countOfInEdges() = 0 (blocks may become * unreachable due to a BBJ_ALWAYS introduced by conditional folding for example) */ if (block->countOfInEdges() == 0) { /* no references -> unreachable - remove it */ /* For now do not update the bbNum, do it at the end */ fgRemoveBlock(block, /* unreachable */ true); change = true; modified = true; /* we removed the current block - the rest of the optimizations won't have a target * continue with the next one */ continue; } else if (block->countOfInEdges() == 1) { switch (block->bbJumpKind) { case BBJ_COND: case BBJ_ALWAYS: if (block->bbJumpDest == block) { fgRemoveBlock(block, /* unreachable */ true); change = true; modified = true; /* we removed the current block - the rest of the optimizations * won't have a target so continue with the next block */ continue; } break; default: break; } } noway_assert(!(block->bbFlags & BBF_REMOVED)); /* Remove EMPTY blocks */ if (block->isEmpty()) { assert(bPrev == block->bbPrev); if (fgOptimizeEmptyBlock(block)) { change = true; modified = true; } /* Have we removed the block? */ if (block->bbFlags & BBF_REMOVED) { /* block was removed - no change to bPrev */ continue; } } /* Set the predecessor of the last reachable block * If we removed the current block, the predecessor remains unchanged * otherwise, since the current block is ok, it becomes the predecessor */ noway_assert(!(block->bbFlags & BBF_REMOVED)); bPrev = block; } } while (change); fgNeedsUpdateFlowGraph = false; #ifdef DEBUG if (verbose && modified) { printf("\nAfter updating the flow graph:\n"); fgDispBasicBlocks(verboseTrees); fgDispHandlerTab(); } if (compRationalIRForm) { for (BasicBlock* const block : Blocks()) { LIR::AsRange(block).CheckLIR(this); } } fgVerifyHandlerTab(); // Make sure that the predecessor lists are accurate fgDebugCheckBBlist(); fgDebugCheckUpdate(); #endif // DEBUG return modified; } #ifdef _PREFAST_ #pragma warning(pop) #endif //------------------------------------------------------------- // fgGetCodeEstimate: Compute a code size estimate for the block, including all statements // and block control flow. // // Arguments: // block - block to consider // // Returns: // Code size estimate for block // unsigned Compiler::fgGetCodeEstimate(BasicBlock* block) { unsigned costSz = 0; // estimate of block's code size cost switch (block->bbJumpKind) { case BBJ_NONE: costSz = 0; break; case BBJ_ALWAYS: case BBJ_EHCATCHRET: case BBJ_LEAVE: case BBJ_COND: costSz = 2; break; case BBJ_CALLFINALLY: costSz = 5; break; case BBJ_SWITCH: costSz = 10; break; case BBJ_THROW: costSz = 1; // We place a int3 after the code for a throw block break; case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: costSz = 1; break; case BBJ_RETURN: // return from method costSz = 3; break; default: noway_assert(!"Bad bbJumpKind"); break; } for (Statement* const stmt : block->NonPhiStatements()) { unsigned char cost = stmt->GetCostSz(); costSz += cost; } return costSz; } #ifdef FEATURE_JIT_METHOD_PERF //------------------------------------------------------------------------ // fgMeasureIR: count and return the number of IR nodes in the function. // unsigned Compiler::fgMeasureIR() { unsigned nodeCount = 0; for (BasicBlock* const block : Blocks()) { if (!block->IsLIR()) { for (Statement* const stmt : block->Statements()) { fgWalkTreePre(stmt->GetRootNodePointer(), [](GenTree** slot, fgWalkData* data) -> Compiler::fgWalkResult { (*reinterpret_cast<unsigned*>(data->pCallbackData))++; return Compiler::WALK_CONTINUE; }, &nodeCount); } } else { for (GenTree* node : LIR::AsRange(block)) { nodeCount++; } } } return nodeCount; } #endif // FEATURE_JIT_METHOD_PERF //------------------------------------------------------------------------ // fgCompDominatedByExceptionalEntryBlocks: compute blocks that are // dominated by not normal entry. // void Compiler::fgCompDominatedByExceptionalEntryBlocks() { assert(fgEnterBlksSetValid); if (BlockSetOps::Count(this, fgEnterBlks) != 1) // There are exception entries. { for (unsigned i = 1; i <= fgBBNumMax; ++i) { BasicBlock* block = fgBBInvPostOrder[i]; if (BlockSetOps::IsMember(this, fgEnterBlks, block->bbNum)) { if (fgFirstBB != block) // skip the normal entry. { block->SetDominatedByExceptionalEntryFlag(); } } else if (block->bbIDom->IsDominatedByExceptionalEntryFlag()) { block->SetDominatedByExceptionalEntryFlag(); } } } }
1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/jit/importer.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "corexcep.h" #define Verify(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ } \ } while (0) #define VerifyOrReturn(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return; \ } \ } while (0) #define VerifyOrReturnSpeculative(cond, msg, speculative) \ do \ { \ if (speculative) \ { \ if (!(cond)) \ { \ return false; \ } \ } \ else \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return false; \ } \ } \ } while (0) /*****************************************************************************/ void Compiler::impInit() { impStmtList = impLastStmt = nullptr; #ifdef DEBUG impInlinedCodeSize = 0; #endif // DEBUG } /***************************************************************************** * * Pushes the given tree on the stack. */ void Compiler::impPushOnStack(GenTree* tree, typeInfo ti) { /* Check for overflow. If inlining, we may be using a bigger stack */ if ((verCurrentState.esStackDepth >= info.compMaxStack) && (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0))) { BADCODE("stack overflow"); } #ifdef DEBUG // If we are pushing a struct, make certain we know the precise type! if (tree->TypeGet() == TYP_STRUCT) { assert(ti.IsType(TI_STRUCT)); CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle(); assert(clsHnd != NO_CLASS_HANDLE); } #endif // DEBUG verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti; verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree; if ((tree->gtType == TYP_LONG) && (compLongUsed == false)) { compLongUsed = true; } else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false)) { compFloatingPointUsed = true; } } inline void Compiler::impPushNullObjRefOnStack() { impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL)); } // This method gets called when we run into unverifiable code // (and we are verifying the method) inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { #ifdef DEBUG const char* tail = strrchr(file, '\\'); if (tail) { file = tail + 1; } if (JitConfig.JitBreakOnUnsafeCode()) { assert(!"Unsafe code detected"); } #endif JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); if (compIsForImportOnly()) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line)); } } inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); #ifdef DEBUG // BreakIfDebuggerPresent(); if (getBreakOnBadCode()) { assert(!"Typechecking error"); } #endif RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr); UNREACHABLE(); } // helper function that will tell us if the IL instruction at the addr passed // by param consumes an address at the top of the stack. We use it to save // us lvAddrTaken bool Compiler::impILConsumesAddr(const BYTE* codeAddr) { assert(!compIsForInlining()); OPCODE opcode; opcode = (OPCODE)getU1LittleEndian(codeAddr); switch (opcode) { // case CEE_LDFLDA: We're taking this one out as if you have a sequence // like // // ldloca.0 // ldflda whatever // // of a primitivelike struct, you end up after morphing with addr of a local // that's not marked as addrtaken, which is wrong. Also ldflda is usually used // for structs that contain other structs, which isnt a case we handle very // well now for other reasons. case CEE_LDFLD: { // We won't collapse small fields. This is probably not the right place to have this // check, but we're only using the function for this purpose, and is easy to factor // out if we need to do so. CORINFO_RESOLVED_TOKEN resolvedToken; impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field); var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField)); // Preserve 'small' int types if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } if (varTypeIsSmall(lclTyp)) { return false; } return true; } default: break; } return false; } void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind) { pResolvedToken->tokenContext = impTokenLookupContextHandle; pResolvedToken->tokenScope = info.compScopeHnd; pResolvedToken->token = getU4LittleEndian(addr); pResolvedToken->tokenType = kind; info.compCompHnd->resolveToken(pResolvedToken); } /***************************************************************************** * * Pop one tree from the stack. */ StackEntry Compiler::impPopStack() { if (verCurrentState.esStackDepth == 0) { BADCODE("stack underflow"); } return verCurrentState.esStack[--verCurrentState.esStackDepth]; } /***************************************************************************** * * Peep at n'th (0-based) tree on the top of the stack. */ StackEntry& Compiler::impStackTop(unsigned n) { if (verCurrentState.esStackDepth <= n) { BADCODE("stack underflow"); } return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1]; } unsigned Compiler::impStackHeight() { return verCurrentState.esStackDepth; } /***************************************************************************** * Some of the trees are spilled specially. While unspilling them, or * making a copy, these need to be handled specially. The function * enumerates the operators possible after spilling. */ #ifdef DEBUG // only used in asserts static bool impValidSpilledStackEntry(GenTree* tree) { if (tree->gtOper == GT_LCL_VAR) { return true; } if (tree->OperIsConst()) { return true; } return false; } #endif /***************************************************************************** * * The following logic is used to save/restore stack contents. * If 'copy' is true, then we make a copy of the trees on the stack. These * have to all be cloneable/spilled values. */ void Compiler::impSaveStackState(SavedStack* savePtr, bool copy) { savePtr->ssDepth = verCurrentState.esStackDepth; if (verCurrentState.esStackDepth) { savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth]; size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees); if (copy) { StackEntry* table = savePtr->ssTrees; /* Make a fresh copy of all the stack entries */ for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++) { table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo; GenTree* tree = verCurrentState.esStack[level].val; assert(impValidSpilledStackEntry(tree)); switch (tree->gtOper) { case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_LCL_VAR: table->val = gtCloneExpr(tree); break; default: assert(!"Bad oper - Not covered by impValidSpilledStackEntry()"); break; } } } else { memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize); } } } void Compiler::impRestoreStackState(SavedStack* savePtr) { verCurrentState.esStackDepth = savePtr->ssDepth; if (verCurrentState.esStackDepth) { memcpy(verCurrentState.esStack, savePtr->ssTrees, verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack)); } } //------------------------------------------------------------------------ // impBeginTreeList: Get the tree list started for a new basic block. // inline void Compiler::impBeginTreeList() { assert(impStmtList == nullptr && impLastStmt == nullptr); } /***************************************************************************** * * Store the given start and end stmt in the given basic block. This is * mostly called by impEndTreeList(BasicBlock *block). It is called * directly only for handling CEE_LEAVEs out of finally-protected try's. */ inline void Compiler::impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt) { /* Make the list circular, so that we can easily walk it backwards */ firstStmt->SetPrevStmt(lastStmt); /* Store the tree list in the basic block */ block->bbStmtList = firstStmt; /* The block should not already be marked as imported */ assert((block->bbFlags & BBF_IMPORTED) == 0); block->bbFlags |= BBF_IMPORTED; } inline void Compiler::impEndTreeList(BasicBlock* block) { if (impStmtList == nullptr) { // The block should not already be marked as imported. assert((block->bbFlags & BBF_IMPORTED) == 0); // Empty block. Just mark it as imported. block->bbFlags |= BBF_IMPORTED; } else { impEndTreeList(block, impStmtList, impLastStmt); } #ifdef DEBUG if (impLastILoffsStmt != nullptr) { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } #endif impStmtList = impLastStmt = nullptr; } /***************************************************************************** * * Check that storing the given tree doesnt mess up the semantic order. Note * that this has only limited value as we can only check [0..chkLevel). */ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) { #ifndef DEBUG return; #else if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE) { return; } GenTree* tree = stmt->GetRootNode(); // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack if (tree->gtFlags & GTF_CALL) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0); } } if (tree->gtOper == GT_ASG) { // For an assignment to a local variable, all references of that // variable have to be spilled. If it is aliased, all calls and // indirect accesses have to be spilled if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); for (unsigned level = 0; level < chkLevel; level++) { assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum)); assert(!lvaTable[lclNum].IsAddressExposed() || (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0); } } // If the access may be to global memory, all side effects have to be spilled. else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0); } } } #endif } //------------------------------------------------------------------------ // impAppendStmt: Append the given statement to the current block's tree list. // // // Arguments: // stmt - The statement to add. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo) { if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE)) { assert(chkLevel <= verCurrentState.esStackDepth); /* If the statement being appended has any side-effects, check the stack to see if anything needs to be spilled to preserve correct ordering. */ GenTree* expr = stmt->GetRootNode(); GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT; // Assignment to (unaliased) locals don't count as a side-effect as // we handle them specially using impSpillLclRefs(). Temp locals should // be fine too. if ((expr->gtOper == GT_ASG) && (expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && ((expr->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) == 0) && !gtHasLocalsWithAddrOp(expr->AsOp()->gtOp2)) { GenTreeFlags op2Flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT; assert(flags == (op2Flags | GTF_ASG)); flags = op2Flags; } if (flags != 0) { bool spillGlobEffects = false; if ((flags & GTF_CALL) != 0) { // If there is a call, we have to spill global refs spillGlobEffects = true; } else if (!expr->OperIs(GT_ASG)) { if ((flags & GTF_ASG) != 0) { // The expression is not an assignment node but it has an assignment side effect, it // must be an atomic op, HW intrinsic or some other kind of node that stores to memory. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } } else { GenTree* lhs = expr->gtGetOp1(); GenTree* rhs = expr->gtGetOp2(); if (((rhs->gtFlags | lhs->gtFlags) & GTF_ASG) != 0) { // Either side of the assignment node has an assignment side effect. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } else if ((lhs->gtFlags & GTF_GLOB_REF) != 0) { spillGlobEffects = true; } } impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt")); } else { impSpillSpecialSideEff(); } } impAppendStmtCheck(stmt, chkLevel); impAppendStmt(stmt); #ifdef FEATURE_SIMD impMarkContiguousSIMDFieldAssignments(stmt); #endif // Once we set the current offset as debug info in an appended tree, we are // ready to report the following offsets. Note that we need to compare // offsets here instead of debug info, since we do not set the "is call" // bit in impCurStmtDI. if (checkConsumedDebugInfo && (impLastStmt->GetDebugInfo().GetLocation().GetOffset() == impCurStmtDI.GetLocation().GetOffset())) { impCurStmtOffsSet(BAD_IL_OFFSET); } #ifdef DEBUG if (impLastILoffsStmt == nullptr) { impLastILoffsStmt = stmt; } if (verbose) { printf("\n\n"); gtDispStmt(stmt); } #endif } //------------------------------------------------------------------------ // impAppendStmt: Add the statement to the current stmts list. // // Arguments: // stmt - the statement to add. // inline void Compiler::impAppendStmt(Statement* stmt) { if (impStmtList == nullptr) { // The stmt is the first in the list. impStmtList = stmt; } else { // Append the expression statement to the existing list. impLastStmt->SetNextStmt(stmt); stmt->SetPrevStmt(impLastStmt); } impLastStmt = stmt; } //------------------------------------------------------------------------ // impExtractLastStmt: Extract the last statement from the current stmts list. // // Return Value: // The extracted statement. // // Notes: // It assumes that the stmt will be reinserted later. // Statement* Compiler::impExtractLastStmt() { assert(impLastStmt != nullptr); Statement* stmt = impLastStmt; impLastStmt = impLastStmt->GetPrevStmt(); if (impLastStmt == nullptr) { impStmtList = nullptr; } return stmt; } //------------------------------------------------------------------------- // impInsertStmtBefore: Insert the given "stmt" before "stmtBefore". // // Arguments: // stmt - a statement to insert; // stmtBefore - an insertion point to insert "stmt" before. // inline void Compiler::impInsertStmtBefore(Statement* stmt, Statement* stmtBefore) { assert(stmt != nullptr); assert(stmtBefore != nullptr); if (stmtBefore == impStmtList) { impStmtList = stmt; } else { Statement* stmtPrev = stmtBefore->GetPrevStmt(); stmt->SetPrevStmt(stmtPrev); stmtPrev->SetNextStmt(stmt); } stmt->SetNextStmt(stmtBefore); stmtBefore->SetPrevStmt(stmt); } //------------------------------------------------------------------------ // impAppendTree: Append the given expression tree to the current block's tree list. // // // Arguments: // tree - The tree that will be the root of the newly created statement. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // di - Debug information to associate with the statement. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // // Return value: // The newly created statement. // Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo) { assert(tree); /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impAppendStmt(stmt, chkLevel, checkConsumedDebugInfo); return stmt; } /***************************************************************************** * * Insert the given expression tree before "stmtBefore" */ void Compiler::impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore) { /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impInsertStmtBefore(stmt, stmtBefore); } /***************************************************************************** * * Append an assignment of the given value to a temp to the current tree list. * curLevel is the stack level for which the spill to the temp is being done. */ void Compiler::impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg = gtNewTempAssign(tmp, val); if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * same as above, but handle the valueclass case too */ void Compiler::impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structType, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg; assert(val->TypeGet() != TYP_STRUCT || structType != NO_CLASS_HANDLE); if (varTypeIsStruct(val) && (structType != NO_CLASS_HANDLE)) { assert(tmpNum < lvaCount); assert(structType != NO_CLASS_HANDLE); // if the method is non-verifiable the assert is not true // so at least ignore it in the case when verification is turned on // since any block that tries to use the temp would have failed verification. var_types varType = lvaTable[tmpNum].lvType; assert(varType == TYP_UNDEF || varTypeIsStruct(varType)); lvaSetStruct(tmpNum, structType, false); varType = lvaTable[tmpNum].lvType; // Now, set the type of the struct value. Note that lvaSetStruct may modify the type // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType) // that has been passed in for the value being assigned to the temp, in which case we // need to set 'val' to that same type. // Note also that if we always normalized the types of any node that might be a struct // type, this would not be necessary - but that requires additional JIT/EE interface // calls that may not actually be required - e.g. if we only access a field of a struct. GenTree* dst = gtNewLclvNode(tmpNum, varType); asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, di, block); } else { asg = gtNewTempAssign(tmpNum, val); } if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * * Pop the given number of values from the stack and return a list node with * their values. * The 'prefixTree' argument may optionally contain an argument * list that is prepended to the list returned from this function. * * The notion of prepended is a bit misleading in that the list is backwards * from the way I would expect: The first element popped is at the end of * the returned list, and prefixTree is 'before' that, meaning closer to * the end of the list. To get to prefixTree, you have to walk to the * end of the list. * * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as * such we reverse its meaning such that returnValue has a reversed * prefixTree at the head of the list. */ GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs) { assert(sig == nullptr || count == sig->numArgs); CORINFO_CLASS_HANDLE structType; GenTreeCall::Use* argList; if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { argList = nullptr; } else { // ARG_ORDER_L2R argList = prefixArgs; } while (count--) { StackEntry se = impPopStack(); typeInfo ti = se.seTypeInfo; GenTree* temp = se.val; if (varTypeIsStruct(temp)) { // Morph trees that aren't already OBJs or MKREFANY to be OBJs assert(ti.IsType(TI_STRUCT)); structType = ti.GetClassHandleForValueClass(); bool forceNormalization = false; if (varTypeIsSIMD(temp)) { // We need to ensure that fgMorphArgs will use the correct struct handle to ensure proper // ABI handling of this argument. // Note that this can happen, for example, if we have a SIMD intrinsic that returns a SIMD type // with a different baseType than we've seen. // We also need to ensure an OBJ node if we have a FIELD node that might be transformed to LCL_FLD // or a plain GT_IND. // TODO-Cleanup: Consider whether we can eliminate all of these cases. if ((gtGetStructHandleIfPresent(temp) != structType) || temp->OperIs(GT_FIELD)) { forceNormalization = true; } } #ifdef DEBUG if (verbose) { printf("Calling impNormStructVal on:\n"); gtDispTree(temp); } #endif temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL, forceNormalization); #ifdef DEBUG if (verbose) { printf("resulting tree:\n"); gtDispTree(temp); } #endif } /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */ argList = gtPrependNewCallArg(temp, argList); } if (sig != nullptr) { if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass); } CORINFO_ARG_LIST_HANDLE sigArgs = sig->args; GenTreeCall::Use* arg; for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--) { PREFIX_ASSUME(arg != nullptr); CORINFO_CLASS_HANDLE classHnd; CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd)); var_types jitSigType = JITtype2varType(corType); if (!impCheckImplicitArgumentCoercion(jitSigType, arg->GetNode()->TypeGet())) { BADCODE("the call argument has a type that can't be implicitly converted to the signature type"); } // insert implied casts (from float to double or double to float) if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT)) { arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE)); } else if ((jitSigType == TYP_FLOAT) && (arg->GetNode()->TypeGet() == TYP_DOUBLE)) { arg->SetNode(gtNewCastNode(TYP_FLOAT, arg->GetNode(), false, TYP_FLOAT)); } // insert any widening or narrowing casts for backwards compatibility arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType)); if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR && corType != CORINFO_TYPE_VAR) { CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs); if (argRealClass != nullptr) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggered from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass); } } const var_types nodeArgType = arg->GetNode()->TypeGet(); if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType)) { assert(!varTypeIsStruct(nodeArgType)); // Some ABI require precise size information for call arguments less than target pointer size, // for example arm64 OSX. Create a special node to keep this information until morph // consumes it into `fgArgInfo`. GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode()); arg->SetNode(putArgType); } sigArgs = info.compCompHnd->getArgNext(sigArgs); } } if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { // Prepend the prefixTree // Simple in-place reversal to place treeList // at the end of a reversed prefixTree while (prefixArgs != nullptr) { GenTreeCall::Use* next = prefixArgs->GetNext(); prefixArgs->SetNext(argList); argList = prefixArgs; prefixArgs = next; } } return argList; } static bool TypeIs(var_types type1, var_types type2) { return type1 == type2; } // Check if type1 matches any type from the list. template <typename... T> static bool TypeIs(var_types type1, var_types type2, T... rest) { return TypeIs(type1, type2) || TypeIs(type1, rest...); } //------------------------------------------------------------------------ // impCheckImplicitArgumentCoercion: check that the node's type is compatible with // the signature's type using ECMA implicit argument coercion table. // // Arguments: // sigType - the type in the call signature; // nodeType - the node type. // // Return Value: // true if they are compatible, false otherwise. // // Notes: // - it is currently allowing byref->long passing, should be fixed in VM; // - it can't check long -> native int case on 64-bit platforms, // so the behavior is different depending on the target bitness. // bool Compiler::impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const { if (sigType == nodeType) { return true; } if (TypeIs(sigType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT)) { if (TypeIs(nodeType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT, TYP_I_IMPL)) { return true; } } else if (TypeIs(sigType, TYP_ULONG, TYP_LONG)) { if (TypeIs(nodeType, TYP_LONG)) { return true; } } else if (TypeIs(sigType, TYP_FLOAT, TYP_DOUBLE)) { if (TypeIs(nodeType, TYP_FLOAT, TYP_DOUBLE)) { return true; } } else if (TypeIs(sigType, TYP_BYREF)) { if (TypeIs(nodeType, TYP_I_IMPL)) { return true; } // This condition tolerates such IL: // ; V00 this ref this class-hnd // ldarg.0 // call(byref) if (TypeIs(nodeType, TYP_REF)) { return true; } } else if (varTypeIsStruct(sigType)) { if (varTypeIsStruct(nodeType)) { return true; } } // This condition should not be under `else` because `TYP_I_IMPL` // intersects with `TYP_LONG` or `TYP_INT`. if (TypeIs(sigType, TYP_I_IMPL, TYP_U_IMPL)) { // Note that it allows `ldc.i8 1; call(nint)` on 64-bit platforms, // but we can't distinguish `nint` from `long` there. if (TypeIs(nodeType, TYP_I_IMPL, TYP_U_IMPL, TYP_INT, TYP_UINT)) { return true; } // It tolerates IL that ECMA does not allow but that is commonly used. // Example: // V02 loc1 struct <RTL_OSVERSIONINFOEX, 32> // ldloca.s 0x2 // call(native int) if (TypeIs(nodeType, TYP_BYREF)) { return true; } } return false; } /***************************************************************************** * * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.) * The first "skipReverseCount" items are not reversed. */ GenTreeCall::Use* Compiler::impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount) { assert(skipReverseCount <= count); GenTreeCall::Use* list = impPopCallArgs(count, sig); // reverse the list if (list == nullptr || skipReverseCount == count) { return list; } GenTreeCall::Use* ptr = nullptr; // Initialized to the first node that needs to be reversed GenTreeCall::Use* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed if (skipReverseCount == 0) { ptr = list; } else { lastSkipNode = list; // Get to the first node that needs to be reversed for (unsigned i = 0; i < skipReverseCount - 1; i++) { lastSkipNode = lastSkipNode->GetNext(); } PREFIX_ASSUME(lastSkipNode != nullptr); ptr = lastSkipNode->GetNext(); } GenTreeCall::Use* reversedList = nullptr; do { GenTreeCall::Use* tmp = ptr->GetNext(); ptr->SetNext(reversedList); reversedList = ptr; ptr = tmp; } while (ptr != nullptr); if (skipReverseCount) { lastSkipNode->SetNext(reversedList); return list; } else { return reversedList; } } //------------------------------------------------------------------------ // impAssignStruct: Create a struct assignment // // Arguments: // dest - the destination of the assignment // src - the value to be assigned // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // ilOffset - il offset for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = nullptr */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = nullptr */ ) { assert(varTypeIsStruct(dest)); DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } while (dest->gtOper == GT_COMMA) { // Second thing is the struct. assert(varTypeIsStruct(dest->AsOp()->gtOp2)); // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree. if (pAfterStmt) { Statement* newStmt = gtNewStmt(dest->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(dest->AsOp()->gtOp1, curLevel, usedDI); // do the side effect } // set dest to the second thing dest = dest->AsOp()->gtOp2; } assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD || dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX); // Return a NOP if this is a self-assignment. if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR && src->AsLclVarCommon()->GetLclNum() == dest->AsLclVarCommon()->GetLclNum()) { return gtNewNothingNode(); } // TODO-1stClassStructs: Avoid creating an address if it is not needed, // or re-creating a Blk node if it is. GenTree* destAddr; if (dest->gtOper == GT_IND || dest->OperIsBlk()) { destAddr = dest->AsOp()->gtOp1; } else { destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); } return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, usedDI, block)); } //------------------------------------------------------------------------ // impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'. // // Arguments: // destAddr - address of the destination of the assignment // src - source of the assignment // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // di - debug info for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* dest = nullptr; GenTreeFlags destFlags = GTF_EMPTY; DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } #ifdef DEBUG #ifdef FEATURE_HW_INTRINSICS if (src->OperIs(GT_HWINTRINSIC)) { const GenTreeHWIntrinsic* intrinsic = src->AsHWIntrinsic(); if (HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())) { assert(src->TypeGet() == TYP_STRUCT); } else { assert(varTypeIsSIMD(src)); } } else #endif // FEATURE_HW_INTRINSICS { assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR, GT_COMMA) || ((src->TypeGet() != TYP_STRUCT) && src->OperIsSIMD())); } #endif // DEBUG var_types asgType = src->TypeGet(); if (src->gtOper == GT_CALL) { GenTreeCall* srcCall = src->AsCall(); if (srcCall->TreatAsHasRetBufArg(this)) { // Case of call returning a struct via hidden retbuf arg CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARM) // Unmanaged instance methods on Windows or Unix X86 need the retbuf arg after the first (this) parameter if ((TargetOS::IsWindows || compUnixX86Abi()) && srcCall->IsUnmanaged()) { if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv())) { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the second-to-last node // so it will be pushed on to the stack after the user args but before the native this arg // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else if (srcCall->GetUnmanagedCallConv() == CorInfoCallConvExtension::Thiscall) { // For thiscall, the "this" parameter is not included in the argument list reversal, // so we need to put the return buffer as the last parameter. for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } else if (lastArg->GetNext() == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, lastArg); } else { assert(lastArg != nullptr && lastArg->GetNext() != nullptr); GenTreeCall::Use* secondLastArg = lastArg; lastArg = lastArg->GetNext(); for (; lastArg->GetNext() != nullptr; secondLastArg = lastArg, lastArg = lastArg->GetNext()) ; assert(secondLastArg->GetNext() != nullptr); gtInsertNewCallArgAfter(destAddr, secondLastArg); } #else GenTreeCall::Use* thisArg = gtInsertNewCallArgAfter(destAddr, srcCall->gtCallArgs); #endif } else { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the last node so it will be pushed on to the stack last // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else { for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } #else // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); #endif } } else #endif // !defined(TARGET_ARM) { // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } // now returns void, not a struct src->gtType = TYP_VOID; // return the morphed call node return src; } else { // Case of call returning a struct in one or more registers. var_types returnType = (var_types)srcCall->gtReturnType; // First we try to change this to "LclVar/LclFld = call" // if ((destAddr->gtOper == GT_ADDR) && (destAddr->AsOp()->gtOp1->gtOper == GT_LCL_VAR)) { // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD. // That is, the IR will be of the form lclVar = call for multi-reg return // GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar(); unsigned lclNum = lcl->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (src->AsCall()->HasMultiRegRetVal()) { // Mark the struct LclVar as used in a MultiReg return context // which currently makes it non promotable. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; } dest = lcl; #if defined(TARGET_ARM) // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case, // but that method has not been updadted to include ARM. impMarkLclDstNotPromotable(lclNum, src, structHnd); lcl->gtFlags |= GTF_DONT_CSE; #elif defined(UNIX_AMD64_ABI) // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs. assert(!src->AsCall()->IsVarargs() && "varargs not allowed for System V OSs."); // Make the struct non promotable. The eightbytes could contain multiple fields. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. // TODO-Cleanup: Why is this needed here? This seems that it will set this even for // non-multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; #endif } else // we don't have a GT_ADDR of a GT_LCL_VAR { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. asgType = returnType; destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->gtOper == GT_RET_EXPR) { GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate->AsCall(); noway_assert(call->gtOper == GT_CALL); if (call->HasRetBufArg()) { // insert the return value buffer into the argument list as first byref parameter call->gtCallArgs = gtPrependNewCallArg(destAddr, call->gtCallArgs); // now returns void, not a struct src->gtType = TYP_VOID; call->gtType = TYP_VOID; // We already have appended the write to 'dest' GT_CALL's args // So now we just return an empty node (pruning the GT_RET_EXPR) return src; } else { // Case of inline method returning a struct in one or more registers. // We won't need a return buffer asgType = src->gtType; if ((destAddr->gtOper != GT_ADDR) || (destAddr->AsOp()->gtOp1->gtOper != GT_LCL_VAR)) { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->OperIsBlk()) { asgType = impNormStructType(structHnd); if (src->gtOper == GT_OBJ) { assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd); } } else if (src->gtOper == GT_INDEX) { asgType = impNormStructType(structHnd); assert(src->AsIndex()->gtStructElemClass == structHnd); } else if (src->gtOper == GT_MKREFANY) { // Since we are assigning the result of a GT_MKREFANY, // "destAddr" must point to a refany. GenTree* destAddrClone; destAddr = impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment")); assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0); assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF); fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr); GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()); GenTree* typeSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset)); // append the assign of the pointer value GenTree* asg = gtNewAssignNode(ptrSlot, src->AsOp()->gtOp1); if (pAfterStmt) { Statement* newStmt = gtNewStmt(asg, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(asg, curLevel, usedDI); } // return the assign of the type value, to be appended return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2); } else if (src->gtOper == GT_COMMA) { // The second thing is the struct or its address. assert(varTypeIsStruct(src->AsOp()->gtOp2) || src->AsOp()->gtOp2->gtType == TYP_BYREF); if (pAfterStmt) { // Insert op1 after '*pAfterStmt' Statement* newStmt = gtNewStmt(src->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else if (impLastStmt != nullptr) { // Do the side-effect as a separate statement. impAppendTree(src->AsOp()->gtOp1, curLevel, usedDI); } else { // In this case we have neither been given a statement to insert after, nor are we // in the importer where we can append the side effect. // Instead, we're going to sink the assignment below the COMMA. src->AsOp()->gtOp2 = impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); return src; } // Evaluate the second thing using recursion. return impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); } else if (src->IsLocal()) { asgType = src->TypeGet(); } else if (asgType == TYP_STRUCT) { // It should already have the appropriate type. assert(asgType == impNormStructType(structHnd)); } if ((dest == nullptr) && (destAddr->OperGet() == GT_ADDR)) { GenTree* destNode = destAddr->gtGetOp1(); // If the actual destination is a local, a GT_INDEX or a block node, or is a node that // will be morphed, don't insert an OBJ(ADDR) if it already has the right type. if (destNode->OperIs(GT_LCL_VAR, GT_INDEX) || destNode->OperIsBlk()) { var_types destType = destNode->TypeGet(); // If one or both types are TYP_STRUCT (one may not yet be normalized), they are compatible // iff their handles are the same. // Otherwise, they are compatible if their types are the same. bool typesAreCompatible = ((destType == TYP_STRUCT) || (asgType == TYP_STRUCT)) ? ((gtGetStructHandleIfPresent(destNode) == structHnd) && varTypeIsStruct(asgType)) : (destType == asgType); if (typesAreCompatible) { dest = destNode; if (destType != TYP_STRUCT) { // Use a normalized type if available. We know from above that they're equivalent. asgType = destType; } } } } if (dest == nullptr) { if (asgType == TYP_STRUCT) { dest = gtNewObjNode(structHnd, destAddr); gtSetObjGcInfo(dest->AsObj()); // Although an obj as a call argument was always assumed to be a globRef // (which is itself overly conservative), that is not true of the operands // of a block assignment. dest->gtFlags &= ~GTF_GLOB_REF; dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF); } else { dest = gtNewOperNode(GT_IND, asgType, destAddr); } } if (dest->OperIs(GT_LCL_VAR) && (src->IsMultiRegNode() || (src->OperIs(GT_RET_EXPR) && src->AsRetExpr()->gtInlineCandidate->AsCall()->HasMultiRegRetVal()))) { if (lvaEnregMultiRegVars && varTypeIsStruct(dest)) { dest->AsLclVar()->SetMultiReg(); } if (src->OperIs(GT_CALL)) { lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true; } } dest->gtFlags |= destFlags; destFlags = dest->gtFlags; // return an assignment node, to be appended GenTree* asgNode = gtNewAssignNode(dest, src); gtBlockOpInit(asgNode, dest, src, false); // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs // of assignments. if ((destFlags & GTF_DONT_CSE) == 0) { dest->gtFlags &= ~(GTF_DONT_CSE); } return asgNode; } /***************************************************************************** Given a struct value, and the class handle for that structure, return the expression for the address for that structure value. willDeref - does the caller guarantee to dereference the pointer. */ GenTree* Compiler::impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref) { assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd)); var_types type = structVal->TypeGet(); genTreeOps oper = structVal->gtOper; if (oper == GT_OBJ && willDeref) { assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd); return (structVal->AsObj()->Addr()); } else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY || structVal->OperIsSimdOrHWintrinsic()) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The 'return value' is now the temp itself type = genActualType(lvaTable[tmpNum].TypeGet()); GenTree* temp = gtNewLclvNode(tmpNum, type); temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp); return temp; } else if (oper == GT_COMMA) { assert(structVal->AsOp()->gtOp2->gtType == type); // Second thing is the struct Statement* oldLastStmt = impLastStmt; structVal->AsOp()->gtOp2 = impGetStructAddr(structVal->AsOp()->gtOp2, structHnd, curLevel, willDeref); structVal->gtType = TYP_BYREF; if (oldLastStmt != impLastStmt) { // Some temp assignment statement was placed on the statement list // for Op2, but that would be out of order with op1, so we need to // spill op1 onto the statement list after whatever was last // before we recursed on Op2 (i.e. before whatever Op2 appended). Statement* beforeStmt; if (oldLastStmt == nullptr) { // The op1 stmt should be the first in the list. beforeStmt = impStmtList; } else { // Insert after the oldLastStmt before the first inserted for op2. beforeStmt = oldLastStmt->GetNextStmt(); } impInsertTreeBefore(structVal->AsOp()->gtOp1, impCurStmtDI, beforeStmt); structVal->AsOp()->gtOp1 = gtNewNothingNode(); } return (structVal); } return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } //------------------------------------------------------------------------ // impNormStructType: Normalize the type of a (known to be) struct class handle. // // Arguments: // structHnd - The class handle for the struct type of interest. // pSimdBaseJitType - (optional, default nullptr) - if non-null, and the struct is a SIMD // type, set to the SIMD base JIT type // // Return Value: // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*). // It may also modify the compFloatingPointUsed flag if the type is a SIMD type. // // Notes: // Normalizing the type involves examining the struct type to determine if it should // be modified to one that is handled specially by the JIT, possibly being a candidate // for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known // call structSizeMightRepresentSIMDType to determine if this api needs to be called. var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* pSimdBaseJitType) { assert(structHnd != NO_CLASS_HANDLE); var_types structType = TYP_STRUCT; #ifdef FEATURE_SIMD if (supportSIMDTypes()) { const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd); // Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type. if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) == 0) { unsigned originalSize = info.compCompHnd->getClassSize(structHnd); if (structSizeMightRepresentSIMDType(originalSize)) { unsigned int sizeBytes; CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structHnd, &sizeBytes); if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(sizeBytes == originalSize); structType = getSIMDTypeForSize(sizeBytes); if (pSimdBaseJitType != nullptr) { *pSimdBaseJitType = simdBaseJitType; } // Also indicate that we use floating point registers. compFloatingPointUsed = true; } } } } #endif // FEATURE_SIMD return structType; } //------------------------------------------------------------------------ // Compiler::impNormStructVal: Normalize a struct value // // Arguments: // structVal - the node we are going to normalize // structHnd - the class handle for the node // curLevel - the current stack level // forceNormalization - Force the creation of an OBJ node (default is false). // // Notes: // Given struct value 'structVal', make sure it is 'canonical', that is // it is either: // - a known struct type (non-TYP_STRUCT, e.g. TYP_SIMD8) // - an OBJ or a MKREFANY node, or // - a node (e.g. GT_INDEX) that will be morphed. // If the node is a CALL or RET_EXPR, a copy will be made to a new temp. // GenTree* Compiler::impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization /*=false*/) { assert(forceNormalization || varTypeIsStruct(structVal)); assert(structHnd != NO_CLASS_HANDLE); var_types structType = structVal->TypeGet(); bool makeTemp = false; if (structType == TYP_STRUCT) { structType = impNormStructType(structHnd); } bool alreadyNormalized = false; GenTreeLclVarCommon* structLcl = nullptr; genTreeOps oper = structVal->OperGet(); switch (oper) { // GT_RETURN and GT_MKREFANY don't capture the handle. case GT_RETURN: break; case GT_MKREFANY: alreadyNormalized = true; break; case GT_CALL: structVal->AsCall()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_RET_EXPR: structVal->AsRetExpr()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_ARGPLACE: structVal->AsArgPlace()->gtArgPlaceClsHnd = structHnd; break; case GT_INDEX: // This will be transformed to an OBJ later. alreadyNormalized = true; structVal->AsIndex()->gtStructElemClass = structHnd; structVal->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(structHnd); break; case GT_FIELD: // Wrap it in a GT_OBJ, if needed. structVal->gtType = structType; if ((structType == TYP_STRUCT) || forceNormalization) { structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } break; case GT_LCL_VAR: case GT_LCL_FLD: structLcl = structVal->AsLclVarCommon(); // Wrap it in a GT_OBJ. structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); FALLTHROUGH; case GT_OBJ: case GT_BLK: case GT_ASG: // These should already have the appropriate type. assert(structVal->gtType == structType); alreadyNormalized = true; break; case GT_IND: assert(structVal->gtType == structType); structVal = gtNewObjNode(structHnd, structVal->gtGetOp1()); alreadyNormalized = true; break; #ifdef FEATURE_SIMD case GT_SIMD: assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType)); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: assert(structVal->gtType == structType); assert(varTypeIsSIMD(structVal) || HWIntrinsicInfo::IsMultiReg(structVal->AsHWIntrinsic()->GetHWIntrinsicId())); break; #endif case GT_COMMA: { // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node. GenTree* blockNode = structVal->AsOp()->gtOp2; assert(blockNode->gtType == structType); // Is this GT_COMMA(op1, GT_COMMA())? GenTree* parent = structVal; if (blockNode->OperGet() == GT_COMMA) { // Find the last node in the comma chain. do { assert(blockNode->gtType == structType); parent = blockNode; blockNode = blockNode->AsOp()->gtOp2; } while (blockNode->OperGet() == GT_COMMA); } if (blockNode->OperGet() == GT_FIELD) { // If we have a GT_FIELD then wrap it in a GT_OBJ. blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode)); } #ifdef FEATURE_SIMD if (blockNode->OperIsSimdOrHWintrinsic()) { parent->AsOp()->gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization); alreadyNormalized = true; } else #endif { noway_assert(blockNode->OperIsBlk()); // Sink the GT_COMMA below the blockNode addr. // That is GT_COMMA(op1, op2=blockNode) is tranformed into // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)). // // In case of a chained GT_COMMA case, we sink the last // GT_COMMA below the blockNode addr. GenTree* blockNodeAddr = blockNode->AsOp()->gtOp1; assert(blockNodeAddr->gtType == TYP_BYREF); GenTree* commaNode = parent; commaNode->gtType = TYP_BYREF; commaNode->AsOp()->gtOp2 = blockNodeAddr; blockNode->AsOp()->gtOp1 = commaNode; if (parent == structVal) { structVal = blockNode; } alreadyNormalized = true; } } break; default: noway_assert(!"Unexpected node in impNormStructVal()"); break; } structVal->gtType = structType; if (!alreadyNormalized || forceNormalization) { if (makeTemp) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The structVal is now the temp itself structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon(); structVal = structLcl; } if ((forceNormalization || (structType == TYP_STRUCT)) && !structVal->OperIsBlk()) { // Wrap it in a GT_OBJ structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } } if (structLcl != nullptr) { // A OBJ on a ADDR(LCL_VAR) can never raise an exception // so we don't set GTF_EXCEPT here. if (!lvaIsImplicitByRefLocal(structLcl->GetLclNum())) { structVal->gtFlags &= ~GTF_GLOB_REF; } } else if (structVal->OperIsBlk()) { // In general a OBJ is an indirection and could raise an exception. structVal->gtFlags |= GTF_EXCEPT; } return structVal; } /******************************************************************************/ // Given a type token, generate code that will evaluate to the correct // handle representation of that token (type handle, field handle, or method handle) // // For most cases, the handle is determined at compile-time, and the code // generated is simply an embedded handle. // // Run-time lookup is required if the enclosing method is shared between instantiations // and the token refers to formal type parameters whose instantiation is not known // at compile-time. // GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup /* = NULL */, bool mustRestoreHandle /* = false */, bool importParent /* = false */) { assert(!fgGlobalMorph); CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo); if (pRuntimeLookup) { *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup; } if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup) { switch (embedInfo.handleType) { case CORINFO_HANDLETYPE_CLASS: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_METHOD: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_FIELD: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle)); break; default: break; } } // Generate the full lookup tree. May be null if we're abandoning an inline attempt. GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG size_t handleToTrack; if (handleFlags == GTF_ICON_TOKEN_HDL) { handleToTrack = 0; } else { handleToTrack = (size_t)compileTimeHandle; } if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = handleToTrack; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = handleToTrack; } #endif return addr; } if (pLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case assert(compIsForInlining()); compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP); return nullptr; } // Need to use dictionary-based access which depends on the typeContext // which is only available at runtime, not at compile-time. return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle); } #ifdef FEATURE_READYTORUN GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE); if (pLookup->accessType == IAT_VALUE) { handle = pLookup->handle; } else if (pLookup->accessType == IAT_PVALUE) { pIndirection = pLookup->addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG assert((handleFlags == GTF_ICON_CLASS_HDL) || (handleFlags == GTF_ICON_METHOD_HDL)); if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } #endif // DEBUG return addr; } //------------------------------------------------------------------------ // impIsCastHelperEligibleForClassProbe: Checks whether a tree is a cast helper eligible to // to be profiled and then optimized with PGO data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper eligible to be profiled // bool Compiler::impIsCastHelperEligibleForClassProbe(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } //------------------------------------------------------------------------ // impIsCastHelperMayHaveProfileData: Checks whether a tree is a cast helper that might // have profile data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper with potential profile data // bool Compiler::impIsCastHelperMayHaveProfileData(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } GenTreeCall* Compiler::impReadyToRunHelperToTree( CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args /* = nullptr */, CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */) { CORINFO_CONST_LOOKUP lookup; if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup)) { return nullptr; } GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args); op1->setEntryPoint(lookup); return op1; } #endif GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* op1 = nullptr; switch (pCallInfo->kind) { case CORINFO_CALL: op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod); #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1->AsFptrVal()->gtEntryPoint = pCallInfo->codePointerLookup.constLookup; } #endif break; case CORINFO_CALL_CODE_POINTER: op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod); break; default: noway_assert(!"unknown call kind"); break; } return op1; } //------------------------------------------------------------------------ // getRuntimeContextTree: find pointer to context for runtime lookup. // // Arguments: // kind - lookup kind. // // Return Value: // Return GenTree pointer to generic shared context. // // Notes: // Reports about generic context using. GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind) { GenTree* ctxTree = nullptr; // Collectible types requires that for shared generic code, if we use the generic context parameter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; Compiler* pRoot = impInlineRoot(); if (kind == CORINFO_LOOKUP_THISOBJ) { // this Object ctxTree = gtNewLclvNode(pRoot->info.compThisArg, TYP_REF); ctxTree->gtFlags |= GTF_VAR_CONTEXT; // context is the method table pointer of the this object ctxTree = gtNewMethodTableLookup(ctxTree); } else { assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM); // Exact method descriptor as passed in ctxTree = gtNewLclvNode(pRoot->info.compTypeCtxtArg, TYP_I_IMPL); ctxTree->gtFlags |= GTF_VAR_CONTEXT; } return ctxTree; } /*****************************************************************************/ /* Import a dictionary lookup to access a handle in code shared between generic instantiations. The lookup depends on the typeContext which is only available at runtime, and not at compile-time. pLookup->token1 and pLookup->token2 specify the handle that is needed. The cases are: 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the instantiation-specific handle, and the tokens to lookup the handle. 2. pLookup->indirections != CORINFO_USEHELPER : 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle to get the handle. 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle. If it is non-NULL, it is the handle required. Else, call a helper to lookup the handle. */ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // It's available only via the run-time helper function if (pRuntimeLookup->indirections == CORINFO_USEHELPER) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pLookup->lookupKind); } #endif return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, ctxTree, compileTimeHandle); } // Slot pointer GenTree* slotPtrTree = ctxTree; if (pRuntimeLookup->testForNull) { slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup slot")); } GenTree* indOffTree = nullptr; GenTree* lastIndOfTree = nullptr; // Applied repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } // The last indirection could be subject to a size check (dynamic dictionary expansion) bool isLastIndirectionWithSizeCheck = ((i == pRuntimeLookup->indirections - 1) && (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)); if (i != 0) { slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!isLastIndirectionWithSizeCheck) { slotPtrTree->gtFlags |= GTF_IND_INVARIANT; } } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree); } if (pRuntimeLookup->offsets[i] != 0) { if (isLastIndirectionWithSizeCheck) { lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } // No null test required if (!pRuntimeLookup->testForNull) { if (pRuntimeLookup->indirections == 0) { return slotPtrTree; } slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!pRuntimeLookup->testForFixup) { return slotPtrTree; } impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0")); unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test")); impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtDI); GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); // downcast the pointer to a TYP_INT on 64-bit targets slot = impImplicitIorI4Cast(slot, TYP_INT); // Use a GT_AND to check for the lowest bit and indirect if it is set GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1)); GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0)); // slot = GT_IND(slot - 1) slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL)); GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add); indir->gtFlags |= GTF_IND_NONFAULTING; indir->gtFlags |= GTF_IND_INVARIANT; slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* asg = gtNewAssignNode(slot, indir); GenTreeColon* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg); GenTreeQmark* qmark = gtNewQmarkNode(TYP_VOID, relop, colon); impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); return gtNewLclvNode(slotLclNum, TYP_I_IMPL); } assert(pRuntimeLookup->indirections != 0); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1")); // Extract the handle GenTree* handleForNullCheck = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); handleForNullCheck->gtFlags |= GTF_IND_NONFAULTING; // Call the helper // - Setup argNode with the pointer to the signature returned by the lookup GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle); GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode); GenTreeCall* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs); // Check for null and possibly call helper GenTree* nullCheck = gtNewOperNode(GT_NE, TYP_INT, handleForNullCheck, gtNewIconNode(0, TYP_I_IMPL)); GenTree* handleForResult = gtCloneExpr(handleForNullCheck); GenTree* result = nullptr; if (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK) { // Dynamic dictionary expansion support assert((lastIndOfTree != nullptr) && (pRuntimeLookup->indirections > 0)); // sizeValue = dictionary[pRuntimeLookup->sizeOffset] GenTreeIntCon* sizeOffset = gtNewIconNode(pRuntimeLookup->sizeOffset, TYP_I_IMPL); GenTree* sizeValueOffset = gtNewOperNode(GT_ADD, TYP_I_IMPL, lastIndOfTree, sizeOffset); GenTree* sizeValue = gtNewOperNode(GT_IND, TYP_I_IMPL, sizeValueOffset); sizeValue->gtFlags |= GTF_IND_NONFAULTING; // sizeCheck fails if sizeValue < pRuntimeLookup->offsets[i] GenTree* offsetValue = gtNewIconNode(pRuntimeLookup->offsets[pRuntimeLookup->indirections - 1], TYP_I_IMPL); GenTree* sizeCheck = gtNewOperNode(GT_LE, TYP_INT, sizeValue, offsetValue); // revert null check condition. nullCheck->ChangeOperUnchecked(GT_EQ); // ((sizeCheck fails || nullCheck fails))) ? (helperCall : handle). // Add checks and the handle as call arguments, indirect call transformer will handle this. helperCall->gtCallArgs = gtPrependNewCallArg(handleForResult, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(sizeCheck, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(nullCheck, helperCall->gtCallArgs); result = helperCall; addExpRuntimeLookupCandidate(helperCall); } else { GenTreeColon* colonNullCheck = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, handleForResult, helperCall); result = gtNewQmarkNode(TYP_I_IMPL, nullCheck, colonNullCheck); } unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree")); impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE); return gtNewLclvNode(tmp, TYP_I_IMPL); } /****************************************************************************** * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp. * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum, * else, grab a new temp. * For structs (which can be pushed on the stack using obj, etc), * special handling is needed */ struct RecursiveGuard { public: RecursiveGuard() { m_pAddress = nullptr; } ~RecursiveGuard() { if (m_pAddress) { *m_pAddress = false; } } void Init(bool* pAddress, bool bInitialize) { assert(pAddress && *pAddress == false && "Recursive guard violation"); m_pAddress = pAddress; if (bInitialize) { *m_pAddress = true; } } protected: bool* m_pAddress; }; bool Compiler::impSpillStackEntry(unsigned level, unsigned tnum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ) { #ifdef DEBUG RecursiveGuard guard; guard.Init(&impNestedStackSpill, bAssertOnRecursion); #endif GenTree* tree = verCurrentState.esStack[level].val; /* Allocate a temp if we haven't been asked to use a particular one */ if (tnum != BAD_VAR_NUM && (tnum >= lvaCount)) { return false; } bool isNewTemp = false; if (tnum == BAD_VAR_NUM) { tnum = lvaGrabTemp(true DEBUGARG(reason)); isNewTemp = true; } /* Assign the spilled entry to the temp */ impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level); // If temp is newly introduced and a ref type, grab what type info we can. if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF)) { assert(lvaTable[tnum].lvSingleDef == 0); lvaTable[tnum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tnum); CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle(); lvaSetClass(tnum, tree, stkHnd); // If we're assigning a GT_RET_EXPR, note the temp over on the call, // so the inliner can use it in case it needs a return spill temp. if (tree->OperGet() == GT_RET_EXPR) { JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum); GenTree* call = tree->AsRetExpr()->gtInlineCandidate; InlineCandidateInfo* ici = call->AsCall()->gtInlineCandidateInfo; ici->preexistingSpillTemp = tnum; } } // The tree type may be modified by impAssignTempGen, so use the type of the lclVar. var_types type = genActualType(lvaTable[tnum].TypeGet()); GenTree* temp = gtNewLclvNode(tnum, type); verCurrentState.esStack[level].val = temp; return true; } /***************************************************************************** * * Ensure that the stack has only spilled values */ void Compiler::impSpillStackEnsure(bool spillLeaves) { assert(!spillLeaves || opts.compDbgCode); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (!spillLeaves && tree->OperIsLeaf()) { continue; } // Temps introduced by the importer itself don't need to be spilled bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->AsLclVarCommon()->GetLclNum() >= info.compLocalsCount); if (isTempLcl) { continue; } impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure")); } } void Compiler::impSpillEvalStack() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack")); } } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and append the assignments to the statement list. * On return the stack is guaranteed to be empty. */ inline void Compiler::impEvalSideEffects() { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); verCurrentState.esStackDepth = 0; } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and replace them on the stack with refs to their temps. * [0..chkLevel) is the portion of the stack which will be checked and spilled. */ inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)) { assert(chkLevel != (unsigned)CHECK_SPILL_NONE); /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */ impSpillSpecialSideEff(); if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } assert(chkLevel <= verCurrentState.esStackDepth); GenTreeFlags spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT; for (unsigned i = 0; i < chkLevel; i++) { GenTree* tree = verCurrentState.esStack[i].val; if ((tree->gtFlags & spillFlags) != 0 || (spillGlobEffects && // Only consider the following when spillGlobEffects == true !impIsAddressInLocal(tree) && // No need to spill the GT_ADDR node on a local. gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or // lvAddrTaken flag. { impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason)); } } } /***************************************************************************** * * If the stack contains any trees with special side effects in them, assign * those trees to temps and replace them on the stack with refs to their temps. */ inline void Compiler::impSpillSpecialSideEff() { // Only exception objects need to be carefully handled if (!compCurBB->bbCatchTyp) { return; } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; // Make sure if we have an exception object in the sub tree we spill ourselves. if (gtHasCatchArg(tree)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff")); } } } /***************************************************************************** * * Spill all stack references to value classes (TYP_STRUCT nodes) */ void Compiler::impSpillValueClasses() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT) { // Tree walk was aborted, which means that we found a // value class on the stack. Need to spill that // stack entry. impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses")); } } } /***************************************************************************** * * Callback that checks if a tree node is TYP_STRUCT */ Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data) { fgWalkResult walkResult = WALK_CONTINUE; if ((*pTree)->gtType == TYP_STRUCT) { // Abort the walk and indicate that we found a value class walkResult = WALK_ABORT; } return walkResult; } /***************************************************************************** * * If the stack contains any trees with references to local #lclNum, assign * those trees to temps and replace their place on the stack with refs to * their temps. */ void Compiler::impSpillLclRefs(ssize_t lclNum) { /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */ impSpillSpecialSideEff(); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; /* If the tree may throw an exception, and the block has a handler, then we need to spill assignments to the local if the local is live on entry to the handler. Just spill 'em all without considering the liveness */ bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT)); /* Skip the tree if it doesn't have an affected reference, unless xcptnCaught */ if (xcptnCaught || gtHasRef(tree, lclNum)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs")); } } } /***************************************************************************** * * Push catch arg onto the stack. * If there are jumps to the beginning of the handler, insert basic block * and spill catch arg to a temp. Update the handler block if necessary. * * Returns the basic block of the actual handler. */ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter) { // Do not inject the basic block twice on reimport. This should be // hit only under JIT stress. See if the block is the one we injected. // Note that EH canonicalization can inject internal blocks here. We might // be able to re-use such a block (but we don't, right now). if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) == (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) { Statement* stmt = hndBlk->firstStmt(); if (stmt != nullptr) { GenTree* tree = stmt->GetRootNode(); assert(tree != nullptr); if ((tree->gtOper == GT_ASG) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && (tree->AsOp()->gtOp2->gtOper == GT_CATCH_ARG)) { tree = gtNewLclvNode(tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(), TYP_REF); impPushOnStack(tree, typeInfo(TI_REF, clsHnd)); return hndBlk->bbNext; } } // If we get here, it must have been some other kind of internal block. It's possible that // someone prepended something to our injected block, but that's unlikely. } /* Push the exception address value on the stack */ GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF); /* Mark the node as having a side-effect - i.e. cannot be * moved around since it is tied to a fixed location (EAX) */ arg->gtFlags |= GTF_ORDER_SIDEEFF; #if defined(JIT32_GCENCODER) const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5); #else const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5); #endif // defined(JIT32_GCENCODER) /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */ if (hndBlk->bbRefs > 1 || forceInsertNewBlock) { if (hndBlk->bbRefs == 1) { hndBlk->bbRefs++; } /* Create extra basic block for the spill */ BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true); newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE; newBlk->inheritWeight(hndBlk); newBlk->bbCodeOffs = hndBlk->bbCodeOffs; /* Account for the new link we are about to create */ hndBlk->bbRefs++; // Spill into a temp. unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); lvaTable[tempNum].lvType = TYP_REF; GenTree* argAsg = gtNewTempAssign(tempNum, arg); arg = gtNewLclvNode(tempNum, TYP_REF); hndBlk->bbStkTempsIn = tempNum; Statement* argStmt; if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { // Report the debug info. impImportBlockCode won't treat the actual handler as exception block and thus // won't do it for us. // TODO-DEBUGINFO: Previous code always set stack as non-empty // here. Can we not just use impCurStmtOffsSet? Are we out of sync // here with the stack? impCurStmtDI = DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, false, false)); argStmt = gtNewStmt(argAsg, impCurStmtDI); } else { argStmt = gtNewStmt(argAsg); } fgInsertStmtAtEnd(newBlk, argStmt); } impPushOnStack(arg, typeInfo(TI_REF, clsHnd)); return hndBlk; } /***************************************************************************** * * Given a tree, clone it. *pClone is set to the cloned tree. * Returns the original tree if the cloning was easy, * else returns the temp to which the tree had to be spilled to. * If the tree has side-effects, it will be spilled to a temp. */ GenTree* Compiler::impCloneExpr(GenTree* tree, GenTree** pClone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)) { if (!(tree->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(tree, true); if (clone) { *pClone = clone; return tree; } } /* Store the operand in a temp and return the temp */ unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which // return a struct type. It also may modify the struct type to a more // specialized type (e.g. a SIMD type). So we will get the type from // the lclVar AFTER calling impAssignTempGen(). impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtDI); var_types type = genActualType(lvaTable[temp].TypeGet()); *pClone = gtNewLclvNode(temp, type); return gtNewLclvNode(temp, type); } //------------------------------------------------------------------------ // impCreateDIWithCurrentStackInfo: Create a DebugInfo instance with the // specified IL offset and 'is call' bit, using the current stack to determine // whether to set the 'stack empty' bit. // // Arguments: // offs - the IL offset for the DebugInfo // isCall - whether the created DebugInfo should have the IsCall bit set // // Return Value: // The DebugInfo instance. // DebugInfo Compiler::impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall) { assert(offs != BAD_IL_OFFSET); bool isStackEmpty = verCurrentState.esStackDepth <= 0; return DebugInfo(compInlineContext, ILLocation(offs, isStackEmpty, isCall)); } //------------------------------------------------------------------------ // impCurStmtOffsSet: Set the "current debug info" to attach to statements that // we are generating next. // // Arguments: // offs - the IL offset // // Remarks: // This function will be called in the main IL processing loop when it is // determined that we have reached a location in the IL stream for which we // want to report debug information. This is the main way we determine which // statements to report debug info for to the EE: for other statements, they // will have no debug information attached. // inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs) { if (offs == BAD_IL_OFFSET) { impCurStmtDI = DebugInfo(compInlineContext, ILLocation()); } else { impCurStmtDI = impCreateDIWithCurrentStackInfo(offs, false); } } //------------------------------------------------------------------------ // impCanSpillNow: check is it possible to spill all values from eeStack to local variables. // // Arguments: // prevOpcode - last importer opcode // // Return Value: // true if it is legal, false if it could be a sequence that we do not want to divide. bool Compiler::impCanSpillNow(OPCODE prevOpcode) { // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence. // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed. return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ); } /***************************************************************************** * * Remember the instr offset for the statements * * When we do impAppendTree(tree), we can't set stmt->SetLastILOffset(impCurOpcOffs), * if the append was done because of a partial stack spill, * as some of the trees corresponding to code up to impCurOpcOffs might * still be sitting on the stack. * So we delay calling of SetLastILOffset() until impNoteLastILoffs(). * This should be called when an opcode finally/explicitly causes * impAppendTree(tree) to be called (as opposed to being called because of * a spill caused by the opcode) */ #ifdef DEBUG void Compiler::impNoteLastILoffs() { if (impLastILoffsStmt == nullptr) { // We should have added a statement for the current basic block // Is this assert correct ? assert(impLastStmt); impLastStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); } else { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } } #endif // DEBUG /***************************************************************************** * We don't create any GenTree (excluding spills) for a branch. * For debugging info, we need a placeholder so that we can note * the IL offset in gtStmt.gtStmtOffs. So append an empty statement. */ void Compiler::impNoteBranchOffs() { if (opts.compDbgCode) { impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } } /***************************************************************************** * Locate the next stmt boundary for which we need to record info. * We will have to spill the stack at such boundaries if it is not * already empty. * Returns the next stmt boundary (after the start of the block) */ unsigned Compiler::impInitBlockLineInfo() { /* Assume the block does not correspond with any IL offset. This prevents us from reporting extra offsets. Extra mappings can cause confusing stepping, especially if the extra mapping is a jump-target, and the debugger does not ignore extra mappings, but instead rewinds to the nearest known offset */ impCurStmtOffsSet(BAD_IL_OFFSET); IL_OFFSET blockOffs = compCurBB->bbCodeOffs; if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES)) { impCurStmtOffsSet(blockOffs); } /* Always report IL offset 0 or some tests get confused. Probably a good idea anyways */ if (blockOffs == 0) { impCurStmtOffsSet(blockOffs); } if (!info.compStmtOffsetsCount) { return ~0; } /* Find the lowest explicit stmt boundary within the block */ /* Start looking at an entry that is based on our instr offset */ unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize; if (index >= info.compStmtOffsetsCount) { index = info.compStmtOffsetsCount - 1; } /* If we've guessed too far, back up */ while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs) { index--; } /* If we guessed short, advance ahead */ while (info.compStmtOffsets[index] < blockOffs) { index++; if (index == info.compStmtOffsetsCount) { return info.compStmtOffsetsCount; } } assert(index < info.compStmtOffsetsCount); if (info.compStmtOffsets[index] == blockOffs) { /* There is an explicit boundary for the start of this basic block. So we will start with bbCodeOffs. Else we will wait until we get to the next explicit boundary */ impCurStmtOffsSet(blockOffs); index++; } return index; } /*****************************************************************************/ bool Compiler::impOpcodeIsCallOpcode(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: return true; default: return false; } } /*****************************************************************************/ static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: case CEE_JMP: case CEE_NEWOBJ: case CEE_NEWARR: return true; default: return false; } } /*****************************************************************************/ // One might think it is worth caching these values, but results indicate // that it isn't. // In addition, caching them causes SuperPMI to be unable to completely // encapsulate an individual method context. CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass() { CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr); return refAnyClass; } CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass() { CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE); assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr); return typeHandleClass; } CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle() { CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE); assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr); return argIteratorClass; } CORINFO_CLASS_HANDLE Compiler::impGetStringClass() { CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING); assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr); return stringClass; } CORINFO_CLASS_HANDLE Compiler::impGetObjectClass() { CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT); assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr); return objectClass; } /***************************************************************************** * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we * set its type to TYP_BYREF when we create it. We know if it can be * changed to TYP_I_IMPL only at the point where we use it */ /* static */ void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2) { if (tree1->IsLocalAddrExpr() != nullptr) { tree1->gtType = TYP_I_IMPL; } if (tree2 && (tree2->IsLocalAddrExpr() != nullptr)) { tree2->gtType = TYP_I_IMPL; } } /***************************************************************************** * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want * to make that an explicit cast in our trees, so any implicit casts that * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are * turned into explicit casts here. * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0) */ GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp) { var_types currType = genActualType(tree->gtType); var_types wantedType = genActualType(dstTyp); if (wantedType != currType) { // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp)) { if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->AsIntCon()->gtIconVal == 0))) { tree->gtType = TYP_I_IMPL; } } #ifdef TARGET_64BIT else if (varTypeIsI(wantedType) && (currType == TYP_INT)) { // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } else if ((wantedType == TYP_INT) && varTypeIsI(currType)) { // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT); } #endif // TARGET_64BIT } return tree; } /***************************************************************************** * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases, * but we want to make that an explicit cast in our trees, so any implicit casts * that exist in the IL are turned into explicit casts here. */ GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp) { if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType)) { tree = gtNewCastNode(dstTyp, tree, false, dstTyp); } return tree; } //------------------------------------------------------------------------ // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray // with a GT_COPYBLK node. // // Arguments: // sig - The InitializeArray signature. // // Return Value: // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or // nullptr otherwise. // // Notes: // The function recognizes the following IL pattern: // ldc <length> or a list of ldc <lower bound>/<length> // newarr or newobj // dup // ldtoken <field handle> // call InitializeArray // The lower bounds need not be constant except when the array rank is 1. // The function recognizes all kinds of arrays thus enabling a small runtime // such as CoreRT to skip providing an implementation for InitializeArray. GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 2); GenTree* fieldTokenNode = impStackTop(0).val; GenTree* arrayLocalNode = impStackTop(1).val; // // Verify that the field token is known and valid. Note that It's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } // // We need to get the number of elements in the array and the size of each element. // We verify that the newarr statement is exactly what we expect it to be. // If it's not then we just return NULL and we don't optimize this call // // It is possible the we don't have any statements in the block yet. if (impLastStmt == nullptr) { return nullptr; } // // We start by looking at the last statement, making sure it's an assignment, and // that the target of the assignment is the array passed to InitializeArray. // GenTree* arrayAssignment = impLastStmt->GetRootNode(); if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (arrayLocalNode->gtOper != GT_LCL_VAR) || (arrayAssignment->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != arrayLocalNode->AsLclVarCommon()->GetLclNum())) { return nullptr; } // // Make sure that the object being assigned is a helper call. // GenTree* newArrayCall = arrayAssignment->AsOp()->gtOp2; if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->AsCall()->gtCallType != CT_HELPER)) { return nullptr; } // // Verify that it is one of the new array helpers. // bool isMDArray = false; if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8) #ifdef FEATURE_READYTORUN && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1) #endif ) { if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR)) { return nullptr; } isMDArray = true; } CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->AsCall()->compileTimeHelperArgumentHandle; // // Make sure we found a compile time handle to the array // if (!arrayClsHnd) { return nullptr; } unsigned rank = 0; S_UINT32 numElements; if (isMDArray) { rank = info.compCompHnd->getArrayRank(arrayClsHnd); if (rank == 0) { return nullptr; } GenTreeCall::Use* tokenArg = newArrayCall->AsCall()->gtCallArgs; assert(tokenArg != nullptr); GenTreeCall::Use* numArgsArg = tokenArg->GetNext(); assert(numArgsArg != nullptr); GenTreeCall::Use* argsArg = numArgsArg->GetNext(); assert(argsArg != nullptr); // // The number of arguments should be a constant between 1 and 64. The rank can't be 0 // so at least one length must be present and the rank can't exceed 32 so there can // be at most 64 arguments - 32 lengths and 32 lower bounds. // if ((!numArgsArg->GetNode()->IsCnsIntOrI()) || (numArgsArg->GetNode()->AsIntCon()->IconValue() < 1) || (numArgsArg->GetNode()->AsIntCon()->IconValue() > 64)) { return nullptr; } unsigned numArgs = static_cast<unsigned>(numArgsArg->GetNode()->AsIntCon()->IconValue()); bool lowerBoundsSpecified; if (numArgs == rank * 2) { lowerBoundsSpecified = true; } else if (numArgs == rank) { lowerBoundsSpecified = false; // // If the rank is 1 and a lower bound isn't specified then the runtime creates // a SDArray. Note that even if a lower bound is specified it can be 0 and then // we get a SDArray as well, see the for loop below. // if (rank == 1) { isMDArray = false; } } else { return nullptr; } // // The rank is known to be at least 1 so we can start with numElements being 1 // to avoid the need to special case the first dimension. // numElements = S_UINT32(1); struct Match { static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) && (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) && (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs); } static bool IsComma(GenTree* tree) { return (tree != nullptr) && (tree->OperGet() == GT_COMMA); } }; unsigned argIndex = 0; GenTree* comma; for (comma = argsArg->GetNode(); Match::IsComma(comma); comma = comma->gtGetOp2()) { if (lowerBoundsSpecified) { // // In general lower bounds can be ignored because they're not needed to // calculate the total number of elements. But for single dimensional arrays // we need to know if the lower bound is 0 because in this case the runtime // creates a SDArray and this affects the way the array data offset is calculated. // if (rank == 1) { GenTree* lowerBoundAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2(); if (lowerBoundNode->IsIntegralConst(0)) { isMDArray = false; } } comma = comma->gtGetOp2(); argIndex++; } GenTree* lengthNodeAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lengthNode = lengthNodeAssign->gtGetOp2(); if (!lengthNode->IsCnsIntOrI()) { return nullptr; } numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue()); argIndex++; } assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs)); if (argIndex != numArgs) { return nullptr; } } else { // // Make sure there are exactly two arguments: the array class and // the number of elements. // GenTree* arrayLengthNode; GenTreeCall::Use* args = newArrayCall->AsCall()->gtCallArgs; #ifdef FEATURE_READYTORUN if (newArrayCall->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)) { // Array length is 1st argument for readytorun helper arrayLengthNode = args->GetNode(); } else #endif { // Array length is 2nd argument for regular helper arrayLengthNode = args->GetNext()->GetNode(); } // // This optimization is only valid for a constant array size. // if (arrayLengthNode->gtOper != GT_CNS_INT) { return nullptr; } numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->gtIconVal); if (!info.compCompHnd->isSDArray(arrayClsHnd)) { return nullptr; } } CORINFO_CLASS_HANDLE elemClsHnd; var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd)); // // Note that genTypeSize will return zero for non primitive types, which is exactly // what we want (size will then be 0, and we will catch this in the conditional below). // Note that we don't expect this to fail for valid binaries, so we assert in the // non-verification case (the verification case should not assert but rather correctly // handle bad binaries). This assert is not guarding any specific invariant, but rather // saying that we don't expect this to happen, and if it is hit, we need to investigate // why. // S_UINT32 elemSize(genTypeSize(elementType)); S_UINT32 size = elemSize * S_UINT32(numElements); if (size.IsOverflow()) { return nullptr; } if ((size.Value() == 0) || (varTypeIsGC(elementType))) { return nullptr; } void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value()); if (!initData) { return nullptr; } // // At this point we are ready to commit to implementing the InitializeArray // intrinsic using a struct assignment. Pop the arguments from the stack and // return the struct assignment node. // impPopStack(); impPopStack(); const unsigned blkSize = size.Value(); unsigned dataOffset; if (isMDArray) { dataOffset = eeGetMDArrayDataOffset(rank); } else { dataOffset = eeGetArrayDataOffset(); } GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL)); GenTree* dst = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, dstAddr, typGetBlkLayout(blkSize)); GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_CONST_PTR, true); #ifdef DEBUG src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_IntializeArrayIntrinsics; #endif return gtNewBlkOpNode(dst, // dst src, // src false, // volatile true); // copyBlock } GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 1); assert(sig->sigInst.methInstCount == 1); GenTree* fieldTokenNode = impStackTop(0).val; // // Verify that the field token is known and valid. Note that it's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } CORINFO_CLASS_HANDLE fieldOwnerHnd = info.compCompHnd->getFieldClass(fieldToken); CORINFO_CLASS_HANDLE fieldClsHnd; var_types fieldElementType = JITtype2varType(info.compCompHnd->getFieldType(fieldToken, &fieldClsHnd, fieldOwnerHnd)); unsigned totalFieldSize; // Most static initialization data fields are of some structure, but it is possible for them to be of various // primitive types as well if (fieldElementType == var_types::TYP_STRUCT) { totalFieldSize = info.compCompHnd->getClassSize(fieldClsHnd); } else { totalFieldSize = genTypeSize(fieldElementType); } // Limit to primitive or enum type - see ArrayNative::GetSpanDataFrom() CORINFO_CLASS_HANDLE targetElemHnd = sig->sigInst.methInst[0]; if (info.compCompHnd->getTypeForPrimitiveValueClass(targetElemHnd) == CORINFO_TYPE_UNDEF) { return nullptr; } const unsigned targetElemSize = info.compCompHnd->getClassSize(targetElemHnd); assert(targetElemSize != 0); const unsigned count = totalFieldSize / targetElemSize; if (count == 0) { return nullptr; } void* data = info.compCompHnd->getArrayInitializationData(fieldToken, totalFieldSize); if (!data) { return nullptr; } // // Ready to commit to the work // impPopStack(); // Turn count and pointer value into constants. GenTree* lengthValue = gtNewIconNode(count, TYP_INT); GenTree* pointerValue = gtNewIconHandleNode((size_t)data, GTF_ICON_CONST_PTR); // Construct ReadOnlySpan<T> to return. CORINFO_CLASS_HANDLE spanHnd = sig->retTypeClass; unsigned spanTempNum = lvaGrabTemp(true DEBUGARG("ReadOnlySpan<T> for CreateSpan<T>")); lvaSetStruct(spanTempNum, spanHnd, false); CORINFO_FIELD_HANDLE pointerFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 0); CORINFO_FIELD_HANDLE lengthFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 1); GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, 0); pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd)); GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue); GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, TARGET_POINTER_SIZE); lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd)); GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); // Now append a few statements the initialize the span impAppendTree(lengthFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); impAppendTree(pointerFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // And finally create a tree that points at the span. return impCreateLocalNode(spanTempNum DEBUGARG(0)); } //------------------------------------------------------------------------ // impIntrinsic: possibly expand intrinsic call into alternate IR sequence // // Arguments: // newobjThis - for constructor calls, the tree for the newly allocated object // clsHnd - handle for the intrinsic method's class // method - handle for the intrinsic method // sig - signature of the intrinsic method // methodFlags - CORINFO_FLG_XXX flags of the intrinsic method // memberRef - the token for the intrinsic method // readonlyCall - true if call has a readonly prefix // tailCall - true if call is in tail position // pConstrainedResolvedToken -- resolved token for constrained call, or nullptr // if call is not constrained // constraintCallThisTransform -- this transform to apply for a constrained call // pIntrinsicName [OUT] -- intrinsic name (see enumeration in namedintrinsiclist.h) // for "traditional" jit intrinsics // isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call // that is amenable to special downstream optimization opportunities // // Returns: // IR tree to use in place of the call, or nullptr if the jit should treat // the intrinsic call like a normal call. // // pIntrinsicName set to non-illegal value if the call is recognized as a // traditional jit intrinsic, even if the intrinsic is not expaned. // // isSpecial set true if the expansion is subject to special // optimizations later in the jit processing // // Notes: // On success the IR tree may be a call to a different method or an inline // sequence. If it is a call, then the intrinsic processing here is responsible // for handling all the special cases, as upon return to impImportCall // expanded intrinsics bypass most of the normal call processing. // // Intrinsics are generally not recognized in minopts and debug codegen. // // However, certain traditional intrinsics are identifed as "must expand" // if there is no fallback implmentation to invoke; these must be handled // in all codegen modes. // // New style intrinsics (where the fallback implementation is in IL) are // identified as "must expand" if they are invoked from within their // own method bodies. // GenTree* Compiler::impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic) { assert((methodFlags & CORINFO_FLG_INTRINSIC) != 0); bool mustExpand = false; bool isSpecial = false; NamedIntrinsic ni = NI_Illegal; if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0) { // The recursive non-virtual calls to Jit intrinsics are must-expand by convention. mustExpand = mustExpand || (gtIsRecursiveCall(method) && !(methodFlags & CORINFO_FLG_VIRTUAL)); ni = lookupNamedIntrinsic(method); // We specially support the following on all platforms to allow for dead // code optimization and to more generally support recursive intrinsics. if (ni == NI_IsSupported_True) { assert(sig->numArgs == 0); return gtNewIconNode(true); } if (ni == NI_IsSupported_False) { assert(sig->numArgs == 0); return gtNewIconNode(false); } if (ni == NI_Throw_PlatformNotSupportedException) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } #ifdef FEATURE_HW_INTRINSICS if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END)) { GenTree* hwintrinsic = impHWIntrinsic(ni, clsHnd, method, sig, mustExpand); if (mustExpand && (hwintrinsic == nullptr)) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand); } return hwintrinsic; } if ((ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END)) { // These intrinsics aren't defined recursively and so they will never be mustExpand // Instead, they provide software fallbacks that will be executed instead. assert(!mustExpand); return impSimdAsHWIntrinsic(ni, clsHnd, method, sig, newobjThis); } #endif // FEATURE_HW_INTRINSICS } *pIntrinsicName = ni; if (ni == NI_System_StubHelpers_GetStubContext) { // must be done regardless of DbgCode and MinOpts return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL); } if (ni == NI_System_StubHelpers_NextCallReturnAddress) { // For now we just avoid inlining anything into these methods since // this intrinsic is only rarely used. We could do this better if we // wanted to by trying to match which call is the one we need to get // the return address of. info.compHasNextCallRetAddr = true; return new (this, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL); } switch (ni) { // CreateSpan must be expanded for NativeAOT case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: mustExpand |= IsTargetAbi(CORINFO_CORERT_ABI); break; case NI_System_ByReference_ctor: case NI_System_ByReference_get_Value: case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: mustExpand = true; break; default: break; } GenTree* retNode = nullptr; // Under debug and minopts, only expand what is required. // NextCallReturnAddress intrinsic returns the return address of the next call. // If that call is an intrinsic and is expanded, codegen for NextCallReturnAddress will fail. // To avoid that we conservatively expand only required intrinsics in methods that call // the NextCallReturnAddress intrinsic. if (!mustExpand && (opts.OptimizationDisabled() || info.compHasNextCallRetAddr)) { *pIntrinsicName = NI_Illegal; return retNode; } CorInfoType callJitType = sig->retType; var_types callType = JITtype2varType(callJitType); /* First do the intrinsics which are always smaller than a call */ if (ni != NI_Illegal) { assert(retNode == nullptr); switch (ni) { case NI_Array_Address: case NI_Array_Get: case NI_Array_Set: retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, ni); break; case NI_System_String_Equals: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_MemoryExtensions_Equals: case NI_System_MemoryExtensions_SequenceEqual: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_String_StartsWith: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_StartsWith: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_AsSpan: case NI_System_String_op_Implicit: { assert(sig->numArgs == 1); isSpecial = impStackTop().val->OperIs(GT_CNS_STR); break; } case NI_System_String_get_Chars: { GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; retNode = gtNewIndexRef(TYP_USHORT, op1, op2); retNode->gtFlags |= GTF_INX_STRING_LAYOUT; break; } case NI_System_String_get_Length: { GenTree* op1 = impPopStack().val; if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { retNode = iconNode; break; } } GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen, compCurBB); op1 = arrLen; // Getting the length of a null string should throw op1->gtFlags |= GTF_EXCEPT; retNode = op1; break; } // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field // in a value type. The canonical example of this is Span<T>. In effect this is just a // substitution. The parameter byref will be assigned into the newly allocated object. case NI_System_ByReference_ctor: { // Remove call to constructor and directly assign the byref passed // to the call to the first slot of the ByReference struct. GenTree* op1 = impPopStack().val; GenTree* thisptr = newobjThis; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0); GenTree* assign = gtNewAssignNode(field, op1); GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1()); assert(byReferenceStruct != nullptr); impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd)); retNode = assign; break; } // Implement ptr value getter for ByReference struct. case NI_System_ByReference_get_Value: { GenTree* op1 = impPopStack().val; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0); retNode = field; break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: { retNode = impCreateSpanIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: { retNode = impInitializeArrayIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant: { GenTree* op1 = impPopStack().val; if (op1->OperIsConst()) { // op1 is a known constant, replace with 'true'. retNode = gtNewIconNode(1); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to true early\n"); // We can also consider FTN_ADDR and typeof(T) here } else { // op1 is not a known constant, we'll do the expansion in morph retNode = new (this, GT_INTRINSIC) GenTreeIntrinsic(TYP_INT, op1, ni, method); JITDUMP("\nConverting RuntimeHelpers.IsKnownConstant to:\n"); DISPTREE(retNode); } break; } case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: { assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it. CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = memberRef; resolvedToken.tokenType = CORINFO_TOKENKIND_Method; CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo); GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef), embedInfo.compileTimeHandle); if (rawHandle == nullptr) { return nullptr; } noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL)); unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle")); impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE); GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL); GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar); var_types resultType = JITtype2varType(sig->retType); retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr); break; } case NI_System_Span_get_Item: case NI_System_ReadOnlySpan_get_Item: { // Have index, stack pointer-to Span<T> s on the stack. Expand to: // // For Span<T> // Comma // BoundsCheck(index, s->_length) // s->_pointer + index * sizeof(T) // // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref // // Signature should show one class type parameter, which // we need to examine. assert(sig->sigInst.classInstCount == 1); assert(sig->numArgs == 1); CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0]; const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd); assert(elemSize > 0); const bool isReadOnly = (ni == NI_System_ReadOnlySpan_get_Item); JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "", info.compCompHnd->getClassName(spanElemHnd), elemSize); GenTree* index = impPopStack().val; GenTree* ptrToSpan = impPopStack().val; GenTree* indexClone = nullptr; GenTree* ptrToSpanClone = nullptr; assert(genActualType(index) == TYP_INT); assert(ptrToSpan->TypeGet() == TYP_BYREF); #if defined(DEBUG) if (verbose) { printf("with ptr-to-span\n"); gtDispTree(ptrToSpan); printf("and index\n"); gtDispTree(index); } #endif // defined(DEBUG) // We need to use both index and ptr-to-span twice, so clone or spill. index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item index")); ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item ptrToSpan")); // Bounds check CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset); GenTree* boundsCheck = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, length, SCK_RNGCHK_FAIL); // Element access index = indexClone; #ifdef TARGET_64BIT if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } #endif if (elemSize != 1) { GenTree* sizeofNode = gtNewIconNode(static_cast<ssize_t>(elemSize), TYP_I_IMPL); index = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, sizeofNode); } CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd); GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset); GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, index); // Prepare result var_types resultType = JITtype2varType(sig->retType); assert(resultType == result->TypeGet()); retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result); break; } case NI_System_RuntimeTypeHandle_GetValueInternal: { GenTree* op1 = impStackTop(0).val; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall())) { // Old tree // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle // // New tree // TreeToGetNativeTypeHandle // Remove call to helper and return the native TypeHandle pointer that was the parameter // to that helper. op1 = impPopStack().val; // Get native TypeHandle argument to old helper GenTreeCall::Use* arg = op1->AsCall()->gtCallArgs; assert(arg->GetNext() == nullptr); op1 = arg->GetNode(); retNode = op1; } // Call the regular function. break; } case NI_System_Type_GetTypeFromHandle: { GenTree* op1 = impStackTop(0).val; CorInfoHelpFunc typeHandleHelper; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper)) { op1 = impPopStack().val; // Replace helper with a more specialized helper that returns RuntimeType if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE) { typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE; } else { assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL); typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL; } assert(op1->AsCall()->gtCallArgs->GetNext() == nullptr); op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->AsCall()->gtCallArgs); op1->gtType = TYP_REF; retNode = op1; } break; } case NI_System_Type_op_Equality: case NI_System_Type_op_Inequality: { JITDUMP("Importing Type.op_*Equality intrinsic\n"); GenTree* op1 = impStackTop(1).val; GenTree* op2 = impStackTop(0).val; GenTree* optTree = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2); if (optTree != nullptr) { // Success, clean up the evaluation stack. impPopStack(); impPopStack(); // See if we can optimize even further, to a handle compare. optTree = gtFoldTypeCompare(optTree); // See if we can now fold a handle compare to a constant. optTree = gtFoldExpr(optTree); retNode = optTree; } else { // Retry optimizing these later isSpecial = true; } break; } case NI_System_Enum_HasFlag: { GenTree* thisOp = impStackTop(1).val; GenTree* flagOp = impStackTop(0).val; GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp); if (optTree != nullptr) { // Optimization successful. Pop the stack for real. impPopStack(); impPopStack(); retNode = optTree; } else { // Retry optimizing this during morph. isSpecial = true; } break; } case NI_System_Type_IsAssignableFrom: { GenTree* typeTo = impStackTop(1).val; GenTree* typeFrom = impStackTop(0).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_IsAssignableTo: { GenTree* typeTo = impStackTop(0).val; GenTree* typeFrom = impStackTop(1).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_get_IsValueType: { // Optimize // // call Type.GetTypeFromHandle (which is replaced with CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) // call Type.IsValueType // // to `true` or `false` // e.g. `typeof(int).IsValueType` => `true` if (impStackTop().val->IsCall()) { GenTreeCall* call = impStackTop().val->AsCall(); if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE)) { CORINFO_CLASS_HANDLE hClass = gtGetHelperArgClassHandle(call->gtCallArgs->GetNode()); if (hClass != NO_CLASS_HANDLE) { retNode = gtNewIconNode((eeIsValueClass(hClass) && // pointers are not value types (e.g. typeof(int*).IsValueType is false) info.compCompHnd->asCorInfoType(hClass) != CORINFO_TYPE_PTR) ? 1 : 0); impPopStack(); // drop CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE call } } } break; } case NI_System_Threading_Thread_get_ManagedThreadId: { if (impStackTop().val->OperIs(GT_RET_EXPR)) { GenTreeCall* call = impStackTop().val->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { if (lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Threading_Thread_get_CurrentThread) { // drop get_CurrentThread() call impPopStack(); call->ReplaceWith(gtNewNothingNode(), this); retNode = gtNewHelperCallNode(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, TYP_INT); } } } break; } #ifdef TARGET_ARM64 // Intrinsify Interlocked.Or and Interlocked.And only for arm64-v8.1 (and newer) // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). case NI_System_Threading_Interlocked_Or: case NI_System_Threading_Interlocked_And: { if (compOpportunisticallyDependsOn(InstructionSet_Atomics)) { assert(sig->numArgs == 2); GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; genTreeOps op = (ni == NI_System_Threading_Interlocked_Or) ? GT_XORR : GT_XAND; retNode = gtNewOperNode(op, genActualType(callType), op1, op2); retNode->gtFlags |= GTF_GLOB_REF | GTF_ASG; } break; } #endif // TARGET_ARM64 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic case NI_System_Threading_Interlocked_CompareExchange: { var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } assert(callType != TYP_STRUCT); assert(sig->numArgs == 3); GenTree* op3 = impPopStack().val; // comparand GenTree* op2 = impPopStack().val; // value GenTree* op1 = impPopStack().val; // location GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3); node->AsCmpXchg()->gtOpLocation->gtFlags |= GTF_DONT_CSE; retNode = node; break; } case NI_System_Threading_Interlocked_Exchange: case NI_System_Threading_Interlocked_ExchangeAdd: { assert(callType != TYP_STRUCT); assert(sig->numArgs == 2); var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; // This creates: // val // XAdd // addr // field (for example) // // In the case where the first argument is the address of a local, we might // want to make this *not* make the var address-taken -- but atomic instructions // on a local are probably pretty useless anyway, so we probably don't care. op1 = gtNewOperNode(ni == NI_System_Threading_Interlocked_ExchangeAdd ? GT_XADD : GT_XCHG, genActualType(callType), op1, op2); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; retNode = op1; break; } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) case NI_System_Threading_Interlocked_MemoryBarrier: case NI_System_Threading_Interlocked_ReadMemoryBarrier: { assert(sig->numArgs == 0); GenTree* op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; // On XARCH `NI_System_Threading_Interlocked_ReadMemoryBarrier` fences need not be emitted. // However, we still need to capture the effect on reordering. if (ni == NI_System_Threading_Interlocked_ReadMemoryBarrier) { op1->gtFlags |= GTF_MEMORYBARRIER_LOAD; } retNode = op1; break; } #ifdef FEATURE_HW_INTRINSICS case NI_System_Math_FusedMultiplyAdd: { #ifdef TARGET_XARCH if (compExactlyDependsOn(InstructionSet_FMA) && supportSIMDTypes()) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return FMA.MultiplyAddScalar( // Vector128.CreateScalarUnsafe(x), // Vector128.CreateScalarUnsafe(y), // Vector128.CreateScalarUnsafe(z) // ).ToScalar(); GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* res = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callJitType, 16); retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callJitType, 16); break; } #elif defined(TARGET_ARM64) if (compExactlyDependsOn(InstructionSet_AdvSimd)) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return AdvSimd.FusedMultiplyAddScalar( // Vector64.Create{ScalarUnsafe}(z), // Vector64.Create{ScalarUnsafe}(y), // Vector64.Create{ScalarUnsafe}(x) // ).ToScalar(); NamedIntrinsic createVector64 = (callType == TYP_DOUBLE) ? NI_Vector64_Create : NI_Vector64_CreateScalarUnsafe; constexpr unsigned int simdSize = 8; GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); // Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3 // while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3 retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar, callJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callJitType, simdSize); break; } #endif // TODO-CQ-XArch: Ideally we would create a GT_INTRINSIC node for fma, however, that currently // requires more extensive changes to valuenum to support methods with 3 operands // We want to generate a GT_INTRINSIC node in the case the call can't be treated as // a target intrinsic so that we can still benefit from CSE and constant folding. break; } #endif // FEATURE_HW_INTRINSICS case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: #ifdef TARGET_ARM64 // ARM64 has fmax/fmin which are IEEE754:2019 minimum/maximum compatible // TODO-XARCH-CQ: Enable this for XARCH when one of the arguments is a constant // so we can then emit maxss/minss and avoid NaN/-0.0 handling case NI_System_Math_Max: case NI_System_Math_Min: #endif case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { retNode = impMathIntrinsic(method, sig, callType, ni, tailCall); break; } case NI_System_Array_Clone: case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: case NI_System_Object_MemberwiseClone: case NI_System_Threading_Thread_get_CurrentThread: { // Flag for later handling. isSpecial = true; break; } case NI_System_Object_GetType: { JITDUMP("\n impIntrinsic: call to Object.GetType\n"); GenTree* op1 = impStackTop(0).val; // If we're calling GetType on a boxed value, just get the type directly. if (op1->IsBoxedValue()) { JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n"); // Try and clean up the box. Obtain the handle we // were going to pass to the newobj. GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE); if (boxTypeHandle != nullptr) { // Note we don't need to play the TYP_STRUCT games here like // do for LDTOKEN since the return value of this operator is Type, // not RuntimeTypeHandle. impPopStack(); GenTreeCall::Use* helperArgs = gtNewCallArgs(boxTypeHandle); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } // If we have a constrained callvirt with a "box this" transform // we know we have a value class and hence an exact type. // // If so, instead of boxing and then extracting the type, just // construct the type directly. if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) && (constraintCallThisTransform == CORINFO_BOX_THIS)) { // Ensure this is one of the is simple box cases (in particular, rule out nullables). const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass); const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX); if (isSafeToOptimize) { JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n"); impPopStack(); GenTree* typeHandleOp = impTokenToHandle(pConstrainedResolvedToken, nullptr, true /* mustRestoreHandle */); if (typeHandleOp == nullptr) { assert(compDonotInline()); return nullptr; } GenTreeCall::Use* helperArgs = gtNewCallArgs(typeHandleOp); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } #ifdef DEBUG if (retNode != nullptr) { JITDUMP("Optimized result for call to GetType is\n"); if (verbose) { gtDispTree(retNode); } } #endif // Else expand as an intrinsic, unless the call is constrained, // in which case we defer expansion to allow impImportCall do the // special constraint processing. if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr)) { JITDUMP("Expanding as special intrinsic\n"); impPopStack(); op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, ni, method); // Set the CALL flag to indicate that the operator is implemented by a call. // Set also the EXCEPTION flag because the native implementation of // NI_System_Object_GetType intrinsic can throw NullReferenceException. op1->gtFlags |= (GTF_CALL | GTF_EXCEPT); retNode = op1; // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } if (retNode == nullptr) { JITDUMP("Leaving as normal call\n"); // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } break; } case NI_System_Array_GetLength: case NI_System_Array_GetLowerBound: case NI_System_Array_GetUpperBound: { // System.Array.GetLength(Int32) method: // public int GetLength(int dimension) // System.Array.GetLowerBound(Int32) method: // public int GetLowerBound(int dimension) // System.Array.GetUpperBound(Int32) method: // public int GetUpperBound(int dimension) // // Only implement these as intrinsics for multi-dimensional arrays. // Only handle constant dimension arguments. GenTree* gtDim = impStackTop().val; GenTree* gtArr = impStackTop(1).val; if (gtDim->IsIntegralConst()) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE arrCls = gtGetClassHandle(gtArr, &isExact, &isNonNull); if (arrCls != NO_CLASS_HANDLE) { unsigned rank = info.compCompHnd->getArrayRank(arrCls); if ((rank > 1) && !info.compCompHnd->isSDArray(arrCls)) { // `rank` is guaranteed to be <=32 (see MAX_RANK in vm\array.h). Any constant argument // is `int` sized. INT64 dimValue = gtDim->AsIntConCommon()->IntegralValue(); assert((unsigned int)dimValue == dimValue); unsigned dim = (unsigned int)dimValue; if (dim < rank) { // This is now known to be a multi-dimension array with a constant dimension // that is in range; we can expand it as an intrinsic. impPopStack().val; // Pop the dim and array object; we already have a pointer to them. impPopStack().val; // Make sure there are no global effects in the array (such as it being a function // call), so we can mark the generated indirection with GTF_IND_INVARIANT. In the // GetUpperBound case we need the cloned object, since we refer to the array // object twice. In the other cases, we don't need to clone. GenTree* gtArrClone = nullptr; if (((gtArr->gtFlags & GTF_GLOB_EFFECT) != 0) || (ni == NI_System_Array_GetUpperBound)) { gtArr = impCloneExpr(gtArr, &gtArrClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("MD intrinsics array")); } switch (ni) { case NI_System_Array_GetLength: { // Generate *(array + offset-to-length-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLengthOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetLowerBound: { // Generate *(array + offset-to-bounds-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetUpperBound: { assert(gtArrClone != nullptr); // Generate: // *(array + offset-to-length-array + sizeof(int) * dim) + // *(array + offset-to-bounds-array + sizeof(int) * dim) - 1 unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); GenTree* gtLowerBound = gtNewIndir(TYP_INT, gtAddr); gtLowerBound->gtFlags |= GTF_IND_INVARIANT; offs = eeGetMDArrayLengthOffset(rank, dim); gtOffs = gtNewIconNode(offs, TYP_I_IMPL); gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArrClone, gtOffs); GenTree* gtLength = gtNewIndir(TYP_INT, gtAddr); gtLength->gtFlags |= GTF_IND_INVARIANT; GenTree* gtSum = gtNewOperNode(GT_ADD, TYP_INT, gtLowerBound, gtLength); GenTree* gtOne = gtNewIconNode(1, TYP_INT); retNode = gtNewOperNode(GT_SUB, TYP_INT, gtSum, gtOne); break; } default: unreached(); } } } } } break; } case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness: { assert(sig->numArgs == 1); // We expect the return type of the ReverseEndianness routine to match the type of the // one and only argument to the method. We use a special instruction for 16-bit // BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally, // we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a // 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below. switch (sig->retType) { case CorInfoType::CORINFO_TYPE_SHORT: case CorInfoType::CORINFO_TYPE_USHORT: retNode = gtNewCastNode(TYP_INT, gtNewOperNode(GT_BSWAP16, TYP_INT, impPopStack().val), false, callType); break; case CorInfoType::CORINFO_TYPE_INT: case CorInfoType::CORINFO_TYPE_UINT: #ifdef TARGET_64BIT case CorInfoType::CORINFO_TYPE_LONG: case CorInfoType::CORINFO_TYPE_ULONG: #endif // TARGET_64BIT retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val); break; default: // This default case gets hit on 32-bit archs when a call to a 64-bit overload // of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard // method call, where the implementation decomposes the operation into two 32-bit // bswap routines. If the input to the 64-bit function is a constant, then we rely // on inlining + constant folding of 32-bit bswaps to effectively constant fold // the 64-bit call site. break; } break; } // Fold PopCount for constant input case NI_System_Numerics_BitOperations_PopCount: { assert(sig->numArgs == 1); if (impStackTop().val->IsIntegralConst()) { typeInfo argType = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); INT64 cns = impPopStack().val->AsIntConCommon()->IntegralValue(); if (argType.IsType(TI_LONG)) { retNode = gtNewIconNode(genCountBits(cns), callType); } else { assert(argType.IsType(TI_INT)); retNode = gtNewIconNode(genCountBits(static_cast<unsigned>(cns)), callType); } } break; } case NI_System_GC_KeepAlive: { retNode = impKeepAliveIntrinsic(impPopStack().val); break; } default: break; } } if (mustExpand && (retNode == nullptr)) { assert(!"Unhandled must expand intrinsic, throwing PlatformNotSupportedException"); return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } // Optionally report if this intrinsic is special // (that is, potentially re-optimizable during morph). if (isSpecialIntrinsic != nullptr) { *isSpecialIntrinsic = isSpecial; } return retNode; } GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) { // Optimize patterns like: // // typeof(TTo).IsAssignableFrom(typeof(TTFrom)) // valueTypeVar.GetType().IsAssignableFrom(typeof(TTFrom)) // typeof(TTFrom).IsAssignableTo(typeof(TTo)) // typeof(TTFrom).IsAssignableTo(valueTypeVar.GetType()) // // to true/false if (typeTo->IsCall() && typeFrom->IsCall()) { // make sure both arguments are `typeof()` CORINFO_METHOD_HANDLE hTypeof = eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE); if ((typeTo->AsCall()->gtCallMethHnd == hTypeof) && (typeFrom->AsCall()->gtCallMethHnd == hTypeof)) { CORINFO_CLASS_HANDLE hClassTo = gtGetHelperArgClassHandle(typeTo->AsCall()->gtCallArgs->GetNode()); CORINFO_CLASS_HANDLE hClassFrom = gtGetHelperArgClassHandle(typeFrom->AsCall()->gtCallArgs->GetNode()); if (hClassTo == NO_CLASS_HANDLE || hClassFrom == NO_CLASS_HANDLE) { return nullptr; } TypeCompareState castResult = info.compCompHnd->compareTypesForCast(hClassFrom, hClassTo); if (castResult == TypeCompareState::May) { // requires runtime check // e.g. __Canon, COMObjects, Nullable return nullptr; } GenTreeIntCon* retNode = gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0); impPopStack(); // drop both CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE calls impPopStack(); return retNode; } } return nullptr; } GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall) { GenTree* op1; GenTree* op2; assert(callType != TYP_STRUCT); assert(IsMathIntrinsic(intrinsicName)); op1 = nullptr; #if !defined(TARGET_X86) // Intrinsics that are not implemented directly by target instructions will // be re-materialized as users calls in rationalizer. For prefixed tail calls, // don't do this optimization, because // a) For back compatibility reasons on desktop .NET Framework 4.6 / 4.6.1 // b) It will be non-trivial task or too late to re-materialize a surviving // tail prefixed GT_INTRINSIC as tail call in rationalizer. if (!IsIntrinsicImplementedByUserCall(intrinsicName) || !tailCall) #else // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad // code generation for certain EH constructs. if (!IsIntrinsicImplementedByUserCall(intrinsicName)) #endif { CORINFO_CLASS_HANDLE tmpClass; CORINFO_ARG_LIST_HANDLE arg; var_types op1Type; var_types op2Type; switch (sig->numArgs) { case 1: op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicName, method); break; case 2: op2 = impPopStack().val; op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } arg = info.compCompHnd->getArgNext(arg); op2Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op2->TypeGet() != genActualType(op2Type)) { assert(varTypeIsFloating(op2)); op2 = gtNewCastNode(callType, op2, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicName, method); break; default: NO_WAY("Unsupported number of args for Math Intrinsic"); } if (IsIntrinsicImplementedByUserCall(intrinsicName)) { op1->gtFlags |= GTF_CALL; } } return op1; } //------------------------------------------------------------------------ // lookupNamedIntrinsic: map method to jit named intrinsic value // // Arguments: // method -- method handle for method // // Return Value: // Id for the named intrinsic, or Illegal if none. // // Notes: // method should have CORINFO_FLG_INTRINSIC set in its attributes, // otherwise it is not a named jit intrinsic. // NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) { const char* className = nullptr; const char* namespaceName = nullptr; const char* enclosingClassName = nullptr; const char* methodName = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName, &enclosingClassName); JITDUMP("Named Intrinsic "); if (namespaceName != nullptr) { JITDUMP("%s.", namespaceName); } if (enclosingClassName != nullptr) { JITDUMP("%s.", enclosingClassName); } if (className != nullptr) { JITDUMP("%s.", className); } if (methodName != nullptr) { JITDUMP("%s", methodName); } if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr)) { // Check if we are dealing with an MD array's known runtime method CorInfoArrayIntrinsic arrayFuncIndex = info.compCompHnd->getArrayIntrinsicID(method); switch (arrayFuncIndex) { case CorInfoArrayIntrinsic::GET: JITDUMP("ARRAY_FUNC_GET: Recognized\n"); return NI_Array_Get; case CorInfoArrayIntrinsic::SET: JITDUMP("ARRAY_FUNC_SET: Recognized\n"); return NI_Array_Set; case CorInfoArrayIntrinsic::ADDRESS: JITDUMP("ARRAY_FUNC_ADDRESS: Recognized\n"); return NI_Array_Address; default: break; } JITDUMP(": Not recognized, not enough metadata\n"); return NI_Illegal; } JITDUMP(": "); NamedIntrinsic result = NI_Illegal; if (strcmp(namespaceName, "System") == 0) { if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0)) { result = NI_System_Enum_HasFlag; } else if (strcmp(className, "Activator") == 0) { if (strcmp(methodName, "AllocatorOf") == 0) { result = NI_System_Activator_AllocatorOf; } else if (strcmp(methodName, "DefaultConstructorOf") == 0) { result = NI_System_Activator_DefaultConstructorOf; } } else if (strcmp(className, "ByReference`1") == 0) { if (strcmp(methodName, ".ctor") == 0) { result = NI_System_ByReference_ctor; } else if (strcmp(methodName, "get_Value") == 0) { result = NI_System_ByReference_get_Value; } } else if (strcmp(className, "Math") == 0 || strcmp(className, "MathF") == 0) { if (strcmp(methodName, "Abs") == 0) { result = NI_System_Math_Abs; } else if (strcmp(methodName, "Acos") == 0) { result = NI_System_Math_Acos; } else if (strcmp(methodName, "Acosh") == 0) { result = NI_System_Math_Acosh; } else if (strcmp(methodName, "Asin") == 0) { result = NI_System_Math_Asin; } else if (strcmp(methodName, "Asinh") == 0) { result = NI_System_Math_Asinh; } else if (strcmp(methodName, "Atan") == 0) { result = NI_System_Math_Atan; } else if (strcmp(methodName, "Atanh") == 0) { result = NI_System_Math_Atanh; } else if (strcmp(methodName, "Atan2") == 0) { result = NI_System_Math_Atan2; } else if (strcmp(methodName, "Cbrt") == 0) { result = NI_System_Math_Cbrt; } else if (strcmp(methodName, "Ceiling") == 0) { result = NI_System_Math_Ceiling; } else if (strcmp(methodName, "Cos") == 0) { result = NI_System_Math_Cos; } else if (strcmp(methodName, "Cosh") == 0) { result = NI_System_Math_Cosh; } else if (strcmp(methodName, "Exp") == 0) { result = NI_System_Math_Exp; } else if (strcmp(methodName, "Floor") == 0) { result = NI_System_Math_Floor; } else if (strcmp(methodName, "FMod") == 0) { result = NI_System_Math_FMod; } else if (strcmp(methodName, "FusedMultiplyAdd") == 0) { result = NI_System_Math_FusedMultiplyAdd; } else if (strcmp(methodName, "ILogB") == 0) { result = NI_System_Math_ILogB; } else if (strcmp(methodName, "Log") == 0) { result = NI_System_Math_Log; } else if (strcmp(methodName, "Log2") == 0) { result = NI_System_Math_Log2; } else if (strcmp(methodName, "Log10") == 0) { result = NI_System_Math_Log10; } else if (strcmp(methodName, "Max") == 0) { result = NI_System_Math_Max; } else if (strcmp(methodName, "Min") == 0) { result = NI_System_Math_Min; } else if (strcmp(methodName, "Pow") == 0) { result = NI_System_Math_Pow; } else if (strcmp(methodName, "Round") == 0) { result = NI_System_Math_Round; } else if (strcmp(methodName, "Sin") == 0) { result = NI_System_Math_Sin; } else if (strcmp(methodName, "Sinh") == 0) { result = NI_System_Math_Sinh; } else if (strcmp(methodName, "Sqrt") == 0) { result = NI_System_Math_Sqrt; } else if (strcmp(methodName, "Tan") == 0) { result = NI_System_Math_Tan; } else if (strcmp(methodName, "Tanh") == 0) { result = NI_System_Math_Tanh; } else if (strcmp(methodName, "Truncate") == 0) { result = NI_System_Math_Truncate; } } else if (strcmp(className, "GC") == 0) { if (strcmp(methodName, "KeepAlive") == 0) { result = NI_System_GC_KeepAlive; } } else if (strcmp(className, "Array") == 0) { if (strcmp(methodName, "Clone") == 0) { result = NI_System_Array_Clone; } else if (strcmp(methodName, "GetLength") == 0) { result = NI_System_Array_GetLength; } else if (strcmp(methodName, "GetLowerBound") == 0) { result = NI_System_Array_GetLowerBound; } else if (strcmp(methodName, "GetUpperBound") == 0) { result = NI_System_Array_GetUpperBound; } } else if (strcmp(className, "Object") == 0) { if (strcmp(methodName, "MemberwiseClone") == 0) { result = NI_System_Object_MemberwiseClone; } else if (strcmp(methodName, "GetType") == 0) { result = NI_System_Object_GetType; } else if (strcmp(methodName, "MethodTableOf") == 0) { result = NI_System_Object_MethodTableOf; } } else if (strcmp(className, "RuntimeTypeHandle") == 0) { if (strcmp(methodName, "GetValueInternal") == 0) { result = NI_System_RuntimeTypeHandle_GetValueInternal; } } else if (strcmp(className, "Type") == 0) { if (strcmp(methodName, "get_IsValueType") == 0) { result = NI_System_Type_get_IsValueType; } else if (strcmp(methodName, "IsAssignableFrom") == 0) { result = NI_System_Type_IsAssignableFrom; } else if (strcmp(methodName, "IsAssignableTo") == 0) { result = NI_System_Type_IsAssignableTo; } else if (strcmp(methodName, "op_Equality") == 0) { result = NI_System_Type_op_Equality; } else if (strcmp(methodName, "op_Inequality") == 0) { result = NI_System_Type_op_Inequality; } else if (strcmp(methodName, "GetTypeFromHandle") == 0) { result = NI_System_Type_GetTypeFromHandle; } } else if (strcmp(className, "String") == 0) { if (strcmp(methodName, "Equals") == 0) { result = NI_System_String_Equals; } else if (strcmp(methodName, "get_Chars") == 0) { result = NI_System_String_get_Chars; } else if (strcmp(methodName, "get_Length") == 0) { result = NI_System_String_get_Length; } else if (strcmp(methodName, "op_Implicit") == 0) { result = NI_System_String_op_Implicit; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_String_StartsWith; } } else if (strcmp(className, "MemoryExtensions") == 0) { if (strcmp(methodName, "AsSpan") == 0) { result = NI_System_MemoryExtensions_AsSpan; } if (strcmp(methodName, "SequenceEqual") == 0) { result = NI_System_MemoryExtensions_SequenceEqual; } else if (strcmp(methodName, "Equals") == 0) { result = NI_System_MemoryExtensions_Equals; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_MemoryExtensions_StartsWith; } } else if (strcmp(className, "Span`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_Span_get_Item; } } else if (strcmp(className, "ReadOnlySpan`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_ReadOnlySpan_get_Item; } } else if (strcmp(className, "EETypePtr") == 0) { if (strcmp(methodName, "EETypePtrOf") == 0) { result = NI_System_EETypePtr_EETypePtrOf; } } } else if (strcmp(namespaceName, "System.Threading") == 0) { if (strcmp(className, "Thread") == 0) { if (strcmp(methodName, "get_CurrentThread") == 0) { result = NI_System_Threading_Thread_get_CurrentThread; } else if (strcmp(methodName, "get_ManagedThreadId") == 0) { result = NI_System_Threading_Thread_get_ManagedThreadId; } } else if (strcmp(className, "Interlocked") == 0) { #ifndef TARGET_ARM64 // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). if (strcmp(methodName, "And") == 0) { result = NI_System_Threading_Interlocked_And; } else if (strcmp(methodName, "Or") == 0) { result = NI_System_Threading_Interlocked_Or; } #endif if (strcmp(methodName, "CompareExchange") == 0) { result = NI_System_Threading_Interlocked_CompareExchange; } else if (strcmp(methodName, "Exchange") == 0) { result = NI_System_Threading_Interlocked_Exchange; } else if (strcmp(methodName, "ExchangeAdd") == 0) { result = NI_System_Threading_Interlocked_ExchangeAdd; } else if (strcmp(methodName, "MemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_MemoryBarrier; } else if (strcmp(methodName, "ReadMemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_ReadMemoryBarrier; } } } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Buffers.Binary") == 0) { if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0)) { result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness; } } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Collections.Generic") == 0) { if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_EqualityComparer_get_Default; } else if ((strcmp(className, "Comparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_Comparer_get_Default; } } else if ((strcmp(namespaceName, "System.Numerics") == 0) && (strcmp(className, "BitOperations") == 0)) { if (strcmp(methodName, "PopCount") == 0) { result = NI_System_Numerics_BitOperations_PopCount; } } #ifdef FEATURE_HW_INTRINSICS else if (strcmp(namespaceName, "System.Numerics") == 0) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); int sizeOfVectorT = getSIMDVectorRegisterByteLength(); result = SimdAsHWIntrinsicInfo::lookupId(&sig, className, methodName, enclosingClassName, sizeOfVectorT); } #endif // FEATURE_HW_INTRINSICS else if ((strcmp(namespaceName, "System.Runtime.CompilerServices") == 0) && (strcmp(className, "RuntimeHelpers") == 0)) { if (strcmp(methodName, "CreateSpan") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan; } else if (strcmp(methodName, "InitializeArray") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray; } else if (strcmp(methodName, "IsKnownConstant") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant; } } else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0) { // We go down this path even when FEATURE_HW_INTRINSICS isn't enabled // so we can specially handle IsSupported and recursive calls. // This is required to appropriately handle the intrinsics on platforms // which don't support them. On such a platform methods like Vector64.Create // will be seen as `Intrinsic` and `mustExpand` due to having a code path // which is recursive. When such a path is hit we expect it to be handled by // the importer and we fire an assert if it wasn't and in previous versions // of the JIT would fail fast. This was changed to throw a PNSE instead but // we still assert as most intrinsics should have been recognized/handled. // In order to avoid the assert, we specially handle the IsSupported checks // (to better allow dead-code optimizations) and we explicitly throw a PNSE // as we know that is the desired behavior for the HWIntrinsics when not // supported. For cases like Vector64.Create, this is fine because it will // be behind a relevant IsSupported check and will never be hit and the // software fallback will be executed instead. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef FEATURE_HW_INTRINSICS namespaceName += 25; const char* platformNamespaceName; #if defined(TARGET_XARCH) platformNamespaceName = ".X86"; #elif defined(TARGET_ARM64) platformNamespaceName = ".Arm"; #else #error Unsupported platform #endif if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0)) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); } #endif // FEATURE_HW_INTRINSICS if (result == NI_Illegal) { if ((strcmp(methodName, "get_IsSupported") == 0) || (strcmp(methodName, "get_IsHardwareAccelerated") == 0)) { // This allows the relevant code paths to be dropped as dead code even // on platforms where FEATURE_HW_INTRINSICS is not supported. result = NI_IsSupported_False; } else if (gtIsRecursiveCall(method)) { // For the framework itself, any recursive intrinsics will either be // only supported on a single platform or will be guarded by a relevant // IsSupported check so the throw PNSE will be valid or dropped. result = NI_Throw_PlatformNotSupportedException; } } } else if (strcmp(namespaceName, "System.StubHelpers") == 0) { if (strcmp(className, "StubHelpers") == 0) { if (strcmp(methodName, "GetStubContext") == 0) { result = NI_System_StubHelpers_GetStubContext; } else if (strcmp(methodName, "NextCallReturnAddress") == 0) { result = NI_System_StubHelpers_NextCallReturnAddress; } } } if (result == NI_Illegal) { JITDUMP("Not recognized\n"); } else if (result == NI_IsSupported_False) { JITDUMP("Unsupported - return false"); } else if (result == NI_Throw_PlatformNotSupportedException) { JITDUMP("Unsupported - throw PlatformNotSupportedException"); } else { JITDUMP("Recognized\n"); } return result; } //------------------------------------------------------------------------ // impUnsupportedNamedIntrinsic: Throws an exception for an unsupported named intrinsic // // Arguments: // helper - JIT helper ID for the exception to be thrown // method - method handle of the intrinsic function. // sig - signature of the intrinsic call // mustExpand - true if the intrinsic must return a GenTree*; otherwise, false // // Return Value: // a gtNewMustThrowException if mustExpand is true; otherwise, nullptr // GenTree* Compiler::impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand) { // We've hit some error case and may need to return a node for the given error. // // When `mustExpand=false`, we are attempting to inline the intrinsic directly into another method. In this // scenario, we need to return `nullptr` so that a GT_CALL to the intrinsic is emitted instead. This is to // ensure that everything continues to behave correctly when optimizations are enabled (e.g. things like the // inliner may expect the node we return to have a certain signature, and the `MustThrowException` node won't // match that). // // When `mustExpand=true`, we are in a GT_CALL to the intrinsic and are attempting to JIT it. This will generally // be in response to an indirect call (e.g. done via reflection) or in response to an earlier attempt returning // `nullptr` (under `mustExpand=false`). In that scenario, we are safe to return the `MustThrowException` node. if (mustExpand) { for (unsigned i = 0; i < sig->numArgs; i++) { impPopStack(); } return gtNewMustThrowException(helper, JITtype2varType(sig->retType), sig->retTypeClass); } else { return nullptr; } } /*****************************************************************************/ GenTree* Compiler::impArrayAccessIntrinsic( CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName) { /* If we are generating SMALL_CODE, we don't want to use intrinsics for the following, as it generates fatter code. */ if (compCodeOpt() == SMALL_CODE) { return nullptr; } /* These intrinsics generate fatter (but faster) code and are only done if we don't need SMALL_CODE */ unsigned rank = (intrinsicName == NI_Array_Set) ? (sig->numArgs - 1) : sig->numArgs; // The rank 1 case is special because it has to handle two array formats // we will simply not do that case if (rank > GT_ARR_MAX_RANK || rank <= 1) { return nullptr; } CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr; var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd)); // For the ref case, we will only be able to inline if the types match // (verifier checks for this, we don't care for the nonverified case and the // type is final (so we don't need to do the cast) if ((intrinsicName != NI_Array_Get) && !readonlyCall && varTypeIsGC(elemType)) { // Get the call site signature CORINFO_SIG_INFO LocalSig; eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig); assert(LocalSig.hasThis()); CORINFO_CLASS_HANDLE actualElemClsHnd; if (intrinsicName == NI_Array_Set) { // Fetch the last argument, the one that indicates the type we are setting. CORINFO_ARG_LIST_HANDLE argType = LocalSig.args; for (unsigned r = 0; r < rank; r++) { argType = info.compCompHnd->getArgNext(argType); } typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType); actualElemClsHnd = argInfo.GetClassHandle(); } else { assert(intrinsicName == NI_Array_Address); // Fetch the return type typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass); assert(retInfo.IsByRef()); actualElemClsHnd = retInfo.GetClassHandle(); } // if it's not final, we can't do the optimization if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL)) { return nullptr; } } unsigned arrayElemSize; if (elemType == TYP_STRUCT) { assert(arrElemClsHnd); arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd); } else { arrayElemSize = genTypeSize(elemType); } if ((unsigned char)arrayElemSize != arrayElemSize) { // arrayElemSize would be truncated as an unsigned char. // This means the array element is too large. Don't do the optimization. return nullptr; } GenTree* val = nullptr; if (intrinsicName == NI_Array_Set) { // Assignment of a struct is more work, and there are more gets than sets. if (elemType == TYP_STRUCT) { return nullptr; } val = impPopStack().val; assert(genActualType(elemType) == genActualType(val->gtType) || (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) || (elemType == TYP_INT && val->gtType == TYP_BYREF) || (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT)); } noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK); GenTree* inds[GT_ARR_MAX_RANK]; for (unsigned k = rank; k > 0; k--) { inds[k - 1] = impPopStack().val; } GenTree* arr = impPopStack().val; assert(arr->gtType == TYP_REF); GenTree* arrElem = new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank), static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]); if (intrinsicName != NI_Array_Address) { if (varTypeIsStruct(elemType)) { arrElem = gtNewObjNode(sig->retTypeClass, arrElem); } else { arrElem = gtNewOperNode(GT_IND, elemType, arrElem); } } if (intrinsicName == NI_Array_Set) { assert(val != nullptr); return gtNewAssignNode(arrElem, val); } else { return arrElem; } } //------------------------------------------------------------------------ // impKeepAliveIntrinsic: Import the GC.KeepAlive intrinsic call // // Imports the intrinsic as a GT_KEEPALIVE node, and, as an optimization, // if the object to keep alive is a GT_BOX, removes its side effects and // uses the address of a local (copied from the box's source if needed) // as the operand for GT_KEEPALIVE. For the BOX optimization, if the class // of the box has no GC fields, a GT_NOP is returned. // // Arguments: // objToKeepAlive - the intrinisic call's argument // // Return Value: // The imported GT_KEEPALIVE or GT_NOP - see description. // GenTree* Compiler::impKeepAliveIntrinsic(GenTree* objToKeepAlive) { assert(objToKeepAlive->TypeIs(TYP_REF)); if (opts.OptimizationEnabled() && objToKeepAlive->IsBoxedValue()) { CORINFO_CLASS_HANDLE boxedClass = lvaGetDesc(objToKeepAlive->AsBox()->BoxOp()->AsLclVar())->lvClassHnd; ClassLayout* layout = typGetObjLayout(boxedClass); if (!layout->HasGCPtr()) { gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_AND_NARROW); JITDUMP("\nBOX class has no GC fields, KEEPALIVE is a NOP"); return gtNewNothingNode(); } GenTree* boxSrc = gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_BUT_NOT_NARROW); if (boxSrc != nullptr) { unsigned boxTempNum; if (boxSrc->OperIs(GT_LCL_VAR)) { boxTempNum = boxSrc->AsLclVarCommon()->GetLclNum(); } else { boxTempNum = lvaGrabTemp(true DEBUGARG("Temp for the box source")); GenTree* boxTempAsg = gtNewTempAssign(boxTempNum, boxSrc); Statement* boxAsgStmt = objToKeepAlive->AsBox()->gtCopyStmtWhenInlinedBoxValue; boxAsgStmt->SetRootNode(boxTempAsg); } JITDUMP("\nImporting KEEPALIVE(BOX) as KEEPALIVE(ADDR(LCL_VAR V%02u))", boxTempNum); GenTree* boxTemp = gtNewLclvNode(boxTempNum, boxSrc->TypeGet()); GenTree* boxTempAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, boxTemp); return gtNewKeepAliveNode(boxTempAddr); } } return gtNewKeepAliveNode(objToKeepAlive); } bool Compiler::verMergeEntryStates(BasicBlock* block, bool* changed) { unsigned i; // do some basic checks first if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth) { return false; } if (verCurrentState.esStackDepth > 0) { // merge stack types StackEntry* parentStack = block->bbStackOnEntry(); StackEntry* childStack = verCurrentState.esStack; for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++) { if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == false) { return false; } } } // merge initialization status of this ptr if (verTrackObjCtorInitState) { // If we're tracking the CtorInitState, then it must not be unknown in the current state. assert(verCurrentState.thisInitialized != TIS_Bottom); // If the successor block's thisInit state is unknown, copy it from the current state. if (block->bbThisOnEntry() == TIS_Bottom) { *changed = true; verSetThisInit(block, verCurrentState.thisInitialized); } else if (verCurrentState.thisInitialized != block->bbThisOnEntry()) { if (block->bbThisOnEntry() != TIS_Top) { *changed = true; verSetThisInit(block, TIS_Top); if (block->bbFlags & BBF_FAILED_VERIFICATION) { // The block is bad. Control can flow through the block to any handler that catches the // verification exception, but the importer ignores bad blocks and therefore won't model // this flow in the normal way. To complete the merge into the bad block, the new state // needs to be manually pushed to the handlers that may be reached after the verification // exception occurs. // // Usually, the new state was already propagated to the relevant handlers while processing // the predecessors of the bad block. The exception is when the bad block is at the start // of a try region, meaning it is protected by additional handlers that do not protect its // predecessors. // if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0)) { // Push TIS_Top to the handlers that protect the bad block. Note that this can cause // recursive calls back into this code path (if successors of the current bad block are // also bad blocks). // ThisInitState origTIS = verCurrentState.thisInitialized; verCurrentState.thisInitialized = TIS_Top; impVerifyEHBlock(block, true); verCurrentState.thisInitialized = origTIS; } } } } } else { assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom); } return true; } /***************************************************************************** * 'logMsg' is true if a log message needs to be logged. false if the caller has * already logged it (presumably in a more detailed fashion than done here) */ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { block->bbJumpKind = BBJ_THROW; block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; impCurStmtOffsSet(block->bbCodeOffs); // Clear the statement list as it exists so far; we're only going to have a verification exception. impStmtList = impLastStmt = nullptr; #ifdef DEBUG if (logMsg) { JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName, block->bbCodeOffs, block->bbCodeOffsEnd)); if (verbose) { printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs); } } if (JitConfig.DebugBreakOnVerificationFailure()) { DebugBreak(); } #endif impBeginTreeList(); // if the stack is non-empty evaluate all the side-effects if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); GenTree* op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewCallArgs(gtNewIconNode(block->bbCodeOffs))); // verCurrentState.esStackDepth = 0; impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // The inliner is not able to handle methods that require throw block, so // make sure this methods never gets inlined. info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE); } /***************************************************************************** * */ void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)) { verResetCurrentState(block, &verCurrentState); verConvertBBToThrowVerificationException(block DEBUGARG(logMsg)); #ifdef DEBUG impNoteLastILoffs(); // Remember at which BC offset the tree was finished #endif // DEBUG } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd) { assert(ciType < CORINFO_TYPE_COUNT); typeInfo tiResult; switch (ciType) { case CORINFO_TYPE_STRING: case CORINFO_TYPE_CLASS: tiResult = verMakeTypeInfo(clsHnd); if (!tiResult.IsType(TI_REF)) { // type must be consistent with element type return typeInfo(); } break; #ifdef TARGET_64BIT case CORINFO_TYPE_NATIVEINT: case CORINFO_TYPE_NATIVEUINT: if (clsHnd) { // If we have more precise information, use it return verMakeTypeInfo(clsHnd); } else { return typeInfo::nativeInt(); } break; #endif // TARGET_64BIT case CORINFO_TYPE_VALUECLASS: case CORINFO_TYPE_REFANY: tiResult = verMakeTypeInfo(clsHnd); // type must be constant with element type; if (!tiResult.IsValueClass()) { return typeInfo(); } break; case CORINFO_TYPE_VAR: return verMakeTypeInfo(clsHnd); case CORINFO_TYPE_PTR: // for now, pointers are treated as an error case CORINFO_TYPE_VOID: return typeInfo(); break; case CORINFO_TYPE_BYREF: { CORINFO_CLASS_HANDLE childClassHandle; CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle); return ByRef(verMakeTypeInfo(childType, childClassHandle)); } break; default: if (clsHnd) { // If we have more precise information, use it return typeInfo(TI_STRUCT, clsHnd); } else { return typeInfo(JITtype2tiType(ciType)); } } return tiResult; } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */) { if (clsHnd == nullptr) { return typeInfo(); } // Byrefs should only occur in method and local signatures, which are accessed // using ICorClassInfo and ICorClassInfo.getChildType. // So findClass() and getClassAttribs() should not be called for byrefs if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF) { assert(!"Did findClass() return a Byref?"); return typeInfo(); } unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd); if (attribs & CORINFO_FLG_VALUECLASS) { CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd); // Meta-data validation should ensure that CORINF_TYPE_BYREF should // not occur here, so we may want to change this to an assert instead. if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR) { return typeInfo(); } #ifdef TARGET_64BIT if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT) { return typeInfo::nativeInt(); } #endif // TARGET_64BIT if (t != CORINFO_TYPE_UNDEF) { return (typeInfo(JITtype2tiType(t))); } else if (bashStructToRef) { return (typeInfo(TI_REF, clsHnd)); } else { return (typeInfo(TI_STRUCT, clsHnd)); } } else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE) { // See comment in _typeInfo.h for why we do it this way. return (typeInfo(TI_REF, clsHnd, true)); } else { return (typeInfo(TI_REF, clsHnd)); } } /******************************************************************************/ bool Compiler::verIsSDArray(const typeInfo& ti) { if (ti.IsNullObjRef()) { // nulls are SD arrays return true; } if (!ti.IsType(TI_REF)) { return false; } if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef())) { return false; } return true; } /******************************************************************************/ /* Given 'arrayObjectType' which is an array type, fetch the element type. */ /* Returns an error type if anything goes wrong */ typeInfo Compiler::verGetArrayElemType(const typeInfo& arrayObjectType) { assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explicitly since that is a success case if (!verIsSDArray(arrayObjectType)) { return typeInfo(); } CORINFO_CLASS_HANDLE childClassHandle = nullptr; CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle); return verMakeTypeInfo(ciType, childClassHandle); } /***************************************************************************** */ typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args) { CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle)); var_types type = JITtype2varType(ciType); if (varTypeIsGC(type)) { // For efficiency, getArgType only returns something in classHandle for // value types. For other types that have addition type info, you // have to call back explicitly classHandle = info.compCompHnd->getArgClass(sig, args); if (!classHandle) { NO_WAY("Could not figure out Class specified in argument or local signature"); } } return verMakeTypeInfo(ciType, classHandle); } bool Compiler::verIsByRefLike(const typeInfo& ti) { if (ti.IsByRef()) { return true; } if (!ti.IsType(TI_STRUCT)) { return false; } return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE; } bool Compiler::verIsSafeToReturnByRef(const typeInfo& ti) { if (ti.IsPermanentHomeByRef()) { return true; } else { return false; } } bool Compiler::verIsBoxable(const typeInfo& ti) { return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables || ti.IsUnboxedGenericTypeVar() || (ti.IsType(TI_STRUCT) && // exclude byreflike structs !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE))); } // Is it a boxed value type? bool Compiler::verIsBoxedValueType(const typeInfo& ti) { if (ti.GetType() == TI_REF) { CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef(); return !!eeIsValueClass(clsHnd); } else { return false; } } /***************************************************************************** * * Check if a TailCall is legal. */ bool Compiler::verCheckTailCallConstraint( OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ) { DWORD mflags; CORINFO_SIG_INFO sig; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped CORINFO_METHOD_HANDLE methodHnd = nullptr; CORINFO_CLASS_HANDLE methodClassHnd = nullptr; unsigned methodClassFlgs = 0; assert(impOpcodeIsCallOpcode(opcode)); if (compIsForInlining()) { return false; } // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { /* Get the call sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; } else { methodHnd = pResolvedToken->hMethod; mflags = info.compCompHnd->getMethodAttribs(methodHnd); // When verifying generic code we pair the method handle with its // owning class to get the exact method signature. methodClassHnd = pResolvedToken->hClass; assert(methodClassHnd); eeGetMethodSig(methodHnd, &sig, methodClassHnd); // opcode specific check methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd); } // We must have got the methodClassHnd if opcode is not CEE_CALLI assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI); if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } // check compatibility of the arguments unsigned int argCount; argCount = sig.numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig.args; while (argCount--) { typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack(); // check that the argument is not a byref for tailcalls VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative); // For unsafe code, we might have parameters containing pointer to the stack location. // Disallow the tailcall for this kind. CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle)); VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative); args = info.compCompHnd->getArgNext(args); } // update popCount popCount += sig.numArgs; // check for 'this' which is on non-static methods, not called via NEWOBJ if (!(mflags & CORINFO_FLG_STATIC)) { // Always update the popCount. // This is crucial for the stack calculation to be correct. typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; if (opcode == CEE_CALLI) { // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object // on the stack. if (tiThis.IsValueClass()) { tiThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative); } else { // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative); } } // Tail calls on constrained calls should be illegal too: // when instantiated at a value type, a constrained call may pass the address of a stack allocated value VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative); // Get the exact view of the signature for an array method if (sig.retType != CORINFO_TYPE_VOID) { if (methodClassFlgs & CORINFO_FLG_ARRAY) { assert(opcode != CEE_CALLI); eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } } typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass); typeInfo tiCallerRetType = verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass); // void return type gets morphed into the error type, so we have to treat them specially here if (sig.retType == CORINFO_TYPE_VOID) { VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch", speculative); } else { VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType), NormaliseForStack(tiCallerRetType), true), "tailcall return mismatch", speculative); } // for tailcall, stack must be empty VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative); return true; // Yes, tailcall is legal } /***************************************************************************** * * Checks the IL verification rules for the call */ void Compiler::verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)) { DWORD mflags; CORINFO_SIG_INFO* sig = nullptr; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { Verify(false, "Calli not verifiable"); return; } //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item. mflags = callInfo->verMethodFlags; sig = &callInfo->verSig; if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } // opcode specific check unsigned methodClassFlgs = callInfo->classFlags; switch (opcode) { case CEE_CALLVIRT: // cannot do callvirt on valuetypes VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class"); VerifyOrReturn(sig->hasThis(), "CallVirt on static method"); break; case CEE_NEWOBJ: { assert(!tailCall); // Importer should not allow this VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC), "newobj must be on instance"); if (methodClassFlgs & CORINFO_FLG_DELEGATE) { VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor"); typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); typeInfo tiDeclaredFtn = verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack(); VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type"); assert(popCount == 0); typeInfo tiActualObj = impStackTop(1).seTypeInfo; typeInfo tiActualFtn = impStackTop(0).seTypeInfo; VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg"); VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch"); VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF), "delegate object type mismatch"); CORINFO_CLASS_HANDLE objTypeHandle = tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef(); // the method signature must be compatible with the delegate's invoke method // check that for virtual functions, the type of the object used to get the // ftn ptr is the same as the type of the object passed to the delegate ctor. // since this is a bit of work to determine in general, we pattern match stylized // code sequences // the delegate creation code check, which used to be done later, is now done here // so we can read delegateMethodRef directly from // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence; // we then use it in our call to isCompatibleDelegate(). mdMemberRef delegateMethodRef = mdMemberRefNil; VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef), "must create delegates with certain IL"); CORINFO_RESOLVED_TOKEN delegateResolvedToken; delegateResolvedToken.tokenContext = impTokenLookupContextHandle; delegateResolvedToken.tokenScope = info.compScopeHnd; delegateResolvedToken.token = delegateMethodRef; delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method; info.compCompHnd->resolveToken(&delegateResolvedToken); CORINFO_CALL_INFO delegateCallInfo; eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */, CORINFO_CALLINFO_SECURITYCHECKS, &delegateCallInfo); bool isOpenDelegate = false; VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass, tiActualFtn.GetMethod(), pResolvedToken->hClass, &isOpenDelegate), "function incompatible with delegate"); // check the constraints on the target method VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass), "delegate target has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass, tiActualFtn.GetMethod()), "delegate target has unsatisfied method constraints"); // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch) // for additional verification rules for delegates CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod(); DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle); if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiActualObj), "The 'this' parameter to the call must be either the calling method's " "'this' parameter or " "a boxed value type."); } } if (actualMethodAttribs & CORINFO_FLG_PROTECTED) { bool targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC; Verify(targetIsStatic || !isOpenDelegate, "Unverifiable creation of an open instance delegate for a protected member."); CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic) ? info.compClassHnd : tiActualObj.GetClassHandleForObjRef(); // In the case of protected methods, it is a requirement that the 'this' // pointer be a subclass of the current context. Perform this check. Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Accessing protected method through wrong type."); } goto DONE_ARGS; } } // fall thru to default checks FALLTHROUGH; default: VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract"); } VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)), "can only newobj a delegate constructor"); // check compatibility of the arguments unsigned int argCount; argCount = sig->numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig->args; while (argCount--) { typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo; typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack(); VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch"); args = info.compCompHnd->getArgNext(args); } DONE_ARGS: // update popCount popCount += sig->numArgs; // check for 'this' which are is non-static methods, not called via NEWOBJ CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd; if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ)) { typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a reference class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis.IsType(TI_REF)) { instanceClassHnd = tiThis.GetClassHandleForObjRef(); } // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } // If this is a call to the base class .ctor, set thisPtr Init for // this block. if (mflags & CORINFO_FLG_CONSTRUCTOR) { if (verTrackObjCtorInitState && tiThis.IsThisPtr() && verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass)) { assert(verCurrentState.thisInitialized != TIS_Bottom); // This should never be the case just from the logic of the verifier. VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit, "Call to base class constructor when 'this' is possibly initialized"); // Otherwise, 'this' is now initialized. verCurrentState.thisInitialized = TIS_Init; tiThis.SetInitialisedObjRef(); } else { // We allow direct calls to value type constructors // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a // constrained callvirt to illegally re-enter a .ctor on a value of reference type. VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(), "Bad call to a constructor"); } } if (pConstrainedResolvedToken != nullptr) { VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call"); typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass); // We just dereference this and test for equality tiThis.DereferenceByRef(); VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint), "this type mismatch with constrained type operand"); // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass); } // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef()) { tiDeclaredThis.SetIsReadonlyByRef(); } VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch"); if (tiThis.IsByRef()) { // Find the actual type where the method exists (as opposed to what is declared // in the metadata). This is to prevent passing a byref as the "this" argument // while calling methods like System.ValueType.GetHashCode() which expect boxed objects. CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod); VerifyOrReturn(eeIsValueClass(actualClassHnd), "Call to base type of valuetype (which is never a valuetype)"); } // Rules for non-virtual call to a non-final virtual method: // Define: // The "this" pointer is considered to be "possibly written" if // 1. Its address have been taken (LDARGA 0) anywhere in the method. // (or) // 2. It has been stored to (STARG.0) anywhere in the method. // A non-virtual call to a non-final virtual method is only allowed if // 1. The this pointer passed to the callee is an instance of a boxed value type. // (or) // 2. The this pointer passed to the callee is the current method's this pointer. // (and) The current method's this pointer is not "possibly written". // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to // virtual methods. (Luckily this does affect .ctors, since they are not virtual). // This is stronger that is strictly needed, but implementing a laxer rule is significantly // hard and more error prone. if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiThis), "The 'this' parameter to the call must be either the calling method's 'this' parameter or " "a boxed value type."); } } // check any constraints on the callee's class and type parameters VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass), "method has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod), "method has unsatisfied method constraints"); if (mflags & CORINFO_FLG_PROTECTED) { VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Can't access protected method"); } // Get the exact view of the signature for an array method if (sig->retType != CORINFO_TYPE_VOID) { eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass); } // "readonly." prefixed calls only allowed for the Address operation on arrays. // The methods supported by array types are under the control of the EE // so we can trust that only the Address operation returns a byref. if (readonlyCall) { typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass); VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(), "unexpected use of readonly prefix"); } // Verify the tailcall if (tailCall) { verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false); } } /***************************************************************************** * Checks that a delegate creation is done using the following pattern: * dup * ldvirtftn targetMemberRef * OR * ldftn targetMemberRef * * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if * not in this basic block) * * targetMemberRef is read from the code sequence. * targetMemberRef is validated iff verificationNeeded. */ bool Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef) { if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]); return true; } else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]); return true; } return false; } typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType) { Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref"); typeInfo ptrVal = verVerifyLDIND(tiTo, instrType); typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack(); if (!tiCompatibleWith(value, normPtrVal, true)) { Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch"); } return ptrVal; } typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType) { assert(!instrType.IsStruct()); typeInfo ptrVal; if (ptr.IsByRef()) { ptrVal = DereferenceByRef(ptr); if (instrType.IsObjRef() && !ptrVal.IsObjRef()) { Verify(false, "bad pointer"); } else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal)) { Verify(false, "pointer not consistent with instr"); } } else { Verify(false, "pointer not byref"); } return ptrVal; } // Verify that the field is used properly. 'tiThis' is NULL for statics, // 'fieldFlags' is the fields attributes, and mutator is true if it is a // ld*flda or a st*fld. // 'enclosingClass' is given if we are accessing a field in some specific type. void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis) { CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass; unsigned fieldFlags = fieldInfo.fieldFlags; CORINFO_CLASS_HANDLE instanceClass = info.compClassHnd; // for statics, we imagine the instance is the current class. bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0); if (mutator) { Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static"); if ((fieldFlags & CORINFO_FLG_FIELD_FINAL)) { Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd && info.compIsStatic == isStaticField, "bad use of initonly field (set or address taken)"); } } if (tiThis == nullptr) { Verify(isStaticField, "used static opcode with non-static field"); } else { typeInfo tThis = *tiThis; if (allowPlainStructAsThis && tThis.IsValueClass()) { tThis.MakeByRef(); } // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a refernce class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis->IsType(TI_REF)) { instanceClass = tiThis->GetClassHandleForObjRef(); } // Note that even if the field is static, we require that the this pointer // satisfy the same constraints as a non-static field This happens to // be simpler and seems reasonable typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); // we allow read-only tThis, on any field access (even stores!), because if the // class implementor wants to prohibit stores he should make the field private. // we do this by setting the read-only bit on the type we compare tThis to. tiDeclaredThis.SetIsReadonlyByRef(); } else if (verTrackObjCtorInitState && tThis.IsThisPtr()) { // Any field access is legal on "uninitialized" this pointers. // The easiest way to implement this is to simply set the // initialized bit for the duration of the type check on the // field access only. It does not change the state of the "this" // for the function as a whole. Note that the "tThis" is a copy // of the original "this" type (*tiThis) passed in. tThis.SetInitialisedObjRef(); } Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch"); } // Presently the JIT does not check that we don't store or take the address of init-only fields // since we cannot guarantee their immutability and it is not a security issue. // check any constraints on the fields's class --- accessing the field might cause a class constructor to run. VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass), "field has unsatisfied class constraints"); if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED) { Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass), "Accessing protected method through wrong type."); } } void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode) { if (tiOp1.IsNumberType()) { #ifdef TARGET_64BIT Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch"); #else // TARGET_64BIT // [10/17/2013] Consider changing this: to put on my verification lawyer hat, // this is non-conforming to the ECMA Spec: types don't have to be equivalent, // but compatible, since we can coalesce native int with int32 (see section III.1.5). Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch"); #endif // !TARGET_64BIT } else if (tiOp1.IsObjRef()) { switch (opcode) { case CEE_BEQ_S: case CEE_BEQ: case CEE_BNE_UN_S: case CEE_BNE_UN: case CEE_CEQ: case CEE_CGT_UN: break; default: Verify(false, "Cond not allowed on object types"); } Verify(tiOp2.IsObjRef(), "Cond type mismatch"); } else if (tiOp1.IsByRef()) { Verify(tiOp2.IsByRef(), "Cond type mismatch"); } else { Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch"); } } void Compiler::verVerifyThisPtrInitialised() { if (verTrackObjCtorInitState) { Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized"); } } bool Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target) { // Either target == context, in this case calling an alternate .ctor // Or target is the immediate parent of context return ((target == context) || (target == info.compCompHnd->getParentType(context))); } GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } // CoreRT generic virtual method if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* runtimeMethodHandle = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_METHOD_HDL, pCallInfo->hMethod); return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL, gtNewCallArgs(thisPtr, runtimeMethodHandle)); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { if (!pCallInfo->exactContextNeedsRuntimeLookup) { GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewCallArgs(thisPtr)); call->setEntryPoint(pCallInfo->codePointerLookup.constLookup); return call; } // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too. if (IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind); return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pCallInfo->codePointerLookup.lookupKind); } } #endif // Get the exact descriptor for the static callsite GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken); if (exactTypeDesc == nullptr) { // compDonotInline() return nullptr; } GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken); if (exactMethodDesc == nullptr) { // compDonotInline() return nullptr; } GenTreeCall::Use* helpArgs = gtNewCallArgs(exactMethodDesc); helpArgs = gtPrependNewCallArg(exactTypeDesc, helpArgs); helpArgs = gtPrependNewCallArg(thisPtr, helpArgs); // Call helper function. This gets the target address of the final destination callsite. return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // impBoxPatternMatch: match and import common box idioms // // Arguments: // pResolvedToken - resolved token from the box operation // codeAddr - position in IL stream after the box instruction // codeEndp - end of IL stream // // Return Value: // Number of IL bytes matched and imported, -1 otherwise // // Notes: // pResolvedToken is known to be a value type; ref type boxing // is handled in the CEE_BOX clause. int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation) { if (codeAddr >= codeEndp) { return -1; } switch (codeAddr[0]) { case CEE_UNBOX_ANY: // box + unbox.any if (codeAddr + 1 + sizeof(mdToken) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } CORINFO_RESOLVED_TOKEN unboxResolvedToken; impResolveToken(codeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // See if the resolved tokens describe types that are equal. const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass); // If so, box/unbox.any is a nop. if (compare == TypeCompareState::Must) { JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n"); // Skip the next unbox.any instruction return 1 + sizeof(mdToken); } } break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: // box + br_true/false if ((codeAddr + ((codeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 0; } GenTree* const treeToBox = impStackTop().val; bool canOptimize = true; GenTree* treeToNullcheck = nullptr; // Can the thing being boxed cause a side effect? if ((treeToBox->gtFlags & GTF_SIDE_EFFECT) != 0) { // Is this a side effect we can replicate cheaply? if (((treeToBox->gtFlags & GTF_SIDE_EFFECT) == GTF_EXCEPT) && treeToBox->OperIs(GT_OBJ, GT_BLK, GT_IND)) { // Yes, we just need to perform a null check if needed. GenTree* const addr = treeToBox->AsOp()->gtGetOp1(); if (fgAddrCouldBeNull(addr)) { treeToNullcheck = addr; } } else { canOptimize = false; } } if (canOptimize) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { JITDUMP("\n Importing BOX; BR_TRUE/FALSE as %sconstant\n", treeToNullcheck == nullptr ? "" : "nullcheck+"); impPopStack(); GenTree* result = gtNewIconNode(1); if (treeToNullcheck != nullptr) { GenTree* nullcheck = gtNewNullCheck(treeToNullcheck, compCurBB); result = gtNewOperNode(GT_COMMA, TYP_INT, nullcheck, result); } impPushOnStack(result, typeInfo(TI_INT)); return 0; } } } break; case CEE_ISINST: if (codeAddr + 1 + sizeof(mdToken) + 1 <= codeEndp) { const BYTE* nextCodeAddr = codeAddr + 1 + sizeof(mdToken); switch (nextCodeAddr[0]) { // box + isinst + br_true/false case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: if ((nextCodeAddr + ((nextCodeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } if (!(impStackTop().val->gtFlags & GTF_SIDE_EFFECT)) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(pResolvedToken->hClass, isInstResolvedToken.hClass); if (castResult != TypeCompareState::May) { JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant\n"); impPopStack(); impPushOnStack(gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0), typeInfo(TI_INT)); // Skip the next isinst instruction return 1 + sizeof(mdToken); } } else if (boxHelper == CORINFO_HELP_BOX_NULLABLE) { // For nullable we're going to fold it to "ldfld hasValue + brtrue/brfalse" or // "ldc.i4.0 + brtrue/brfalse" in case if the underlying type is not castable to // the target type. CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); CORINFO_CLASS_HANDLE nullableCls = pResolvedToken->hClass; CORINFO_CLASS_HANDLE underlyingCls = info.compCompHnd->getTypeForBox(nullableCls); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(underlyingCls, isInstResolvedToken.hClass); if (castResult == TypeCompareState::Must) { const CORINFO_FIELD_HANDLE hasValueFldHnd = info.compCompHnd->getFieldInClass(nullableCls, 0); assert(info.compCompHnd->getFieldOffset(hasValueFldHnd) == 0); assert(!strcmp(info.compCompHnd->getFieldName(hasValueFldHnd, nullptr), "hasValue")); GenTree* objToBox = impPopStack().val; // Spill struct to get its address (to access hasValue field) objToBox = impGetStructAddr(objToBox, nullableCls, (unsigned)CHECK_SPILL_ALL, true); impPushOnStack(gtNewFieldRef(TYP_BOOL, hasValueFldHnd, objToBox, 0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as nullableVT.hasValue\n"); return 1 + sizeof(mdToken); } else if (castResult == TypeCompareState::MustNot) { impPopStack(); impPushOnStack(gtNewIconNode(0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant (false)\n"); return 1 + sizeof(mdToken); } } } } break; // box + isinst + unbox.any case CEE_UNBOX_ANY: if ((nextCodeAddr + 1 + sizeof(mdToken)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 2 + sizeof(mdToken) * 2; } // See if the resolved tokens in box, isinst and unbox.any describe types that are equal. CORINFO_RESOLVED_TOKEN isinstResolvedToken = {}; impResolveToken(codeAddr + 1, &isinstResolvedToken, CORINFO_TOKENKIND_Class); if (info.compCompHnd->compareTypesForEquality(isinstResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { CORINFO_RESOLVED_TOKEN unboxResolvedToken = {}; impResolveToken(nextCodeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // If so, box + isinst + unbox.any is a nop. if (info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { JITDUMP("\n Importing BOX; ISINST, UNBOX.ANY as NOP\n"); return 2 + sizeof(mdToken) * 2; } } } break; } } break; default: break; } return -1; } //------------------------------------------------------------------------ // impImportAndPushBox: build and import a value-type box // // Arguments: // pResolvedToken - resolved token from the box operation // // Return Value: // None. // // Side Effects: // The value to be boxed is popped from the stack, and a tree for // the boxed value is pushed. This method may create upstream // statements, spill side effecting trees, and create new temps. // // If importing an inlinee, we may also discover the inline must // fail. If so there is no new value pushed on the stack. Callers // should use CompDoNotInline after calling this method to see if // ongoing importation should be aborted. // // Notes: // Boxing of ref classes results in the same value as the value on // the top of the stack, so is handled inline in impImportBlockCode // for the CEE_BOX case. Only value or primitive type boxes make it // here. // // Boxing for nullable types is done via a helper call; boxing // of other value types is expanded inline or handled via helper // call, depending on the jit's codegen mode. // // When the jit is operating in size and time constrained modes, // using a helper call here can save jit time and code size. But it // also may inhibit cleanup optimizations that could have also had a // even greater benefit effect on code size and jit time. An optimal // strategy may need to peek ahead and see if it is easy to tell how // the box is being used. For now, we defer. void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) { // Spill any special side effects impSpillSpecialSideEff(); // Get get the expression to box from the stack. GenTree* op1 = nullptr; GenTree* op2 = nullptr; StackEntry se = impPopStack(); CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle(); GenTree* exprToBox = se.val; // Look at what helper we should use. CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); // Determine what expansion to prefer. // // In size/time/debuggable constrained modes, the helper call // expansion for box is generally smaller and is preferred, unless // the value to box is a struct that comes from a call. In that // case the call can construct its return value directly into the // box payload, saving possibly some up-front zeroing. // // Currently primitive type boxes always get inline expanded. We may // want to do the same for small structs if they don't come from // calls and don't have GC pointers, since explicitly copying such // structs is cheap. JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via"); bool canExpandInline = (boxHelper == CORINFO_HELP_BOX); bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled(); bool expandInline = canExpandInline && !optForSize; if (expandInline) { JITDUMP(" inline allocate/copy sequence\n"); // we are doing 'normal' boxing. This means that we can inline the box operation // Box(expr) gets morphed into // temp = new(clsHnd) // cpobj(temp+4, expr, clsHnd) // push temp // The code paths differ slightly below for structs and primitives because // "cpobj" differs in these cases. In one case you get // impAssignStructPtr(temp+4, expr, clsHnd) // and the other you get // *(temp+4) = expr if (opts.OptimizationDisabled()) { // For minopts/debug code, try and minimize the total number // of box temps by reusing an existing temp when possible. if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM) { impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper")); } } else { // When optimizing, use a new temp for each box operation // since we then know the exact class of the box temp. impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper")); lvaTable[impBoxTemp].lvType = TYP_REF; lvaTable[impBoxTemp].lvSingleDef = 1; JITDUMP("Marking V%02u as a single def local\n", impBoxTemp); const bool isExact = true; lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact); } // needs to stay in use until this box expression is appended // some other node. We approximate this by keeping it alive until // the opcode stack becomes empty impBoxTempInUse = true; // Remember the current last statement in case we need to move // a range of statements to ensure the box temp is initialized // before it's used. // Statement* const cursor = impLastStmt; const bool useParent = false; op1 = gtNewAllocObjNode(pResolvedToken, useParent); if (op1 == nullptr) { // If we fail to create the newobj node, we must be inlining // and have run across a type we can't describe. // assert(compDonotInline()); return; } // Remember that this basic block contains 'new' of an object, // and so does this method // compCurBB->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Assign the boxed object to the box temp. // GenTree* asg = gtNewTempAssign(impBoxTemp, op1); Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // If the exprToBox is a call that returns its value via a ret buf arg, // move the assignment statement(s) before the call (which must be a top level tree). // // We do this because impAssignStructPtr (invoked below) will // back-substitute into a call when it sees a GT_RET_EXPR and the call // has a hidden buffer pointer, So we need to reorder things to avoid // creating out-of-sequence IR. // if (varTypeIsStruct(exprToBox) && exprToBox->OperIs(GT_RET_EXPR)) { GenTreeCall* const call = exprToBox->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->HasRetBufArg()) { JITDUMP("Must insert newobj stmts for box before call [%06u]\n", dspTreeID(call)); // Walk back through the statements in this block, looking for the one // that has this call as the root node. // // Because gtNewTempAssign (above) may have added statements that // feed into the actual assignment we need to move this set of added // statements as a group. // // Note boxed allocations are side-effect free (no com or finalizer) so // our only worries here are (correctness) not overlapping the box temp // lifetime and (perf) stretching the temp lifetime across the inlinee // body. // // Since this is an inline candidate, we must be optimizing, and so we have // a unique box temp per call. So no worries about overlap. // assert(!opts.OptimizationDisabled()); // Lifetime stretching could addressed with some extra cleverness--sinking // the allocation back down to just before the copy, once we figure out // where the copy is. We defer for now. // Statement* insertBeforeStmt = cursor; noway_assert(insertBeforeStmt != nullptr); while (true) { if (insertBeforeStmt->GetRootNode() == call) { break; } // If we've searched all the statements in the block and failed to // find the call, then something's wrong. // noway_assert(insertBeforeStmt != impStmtList); insertBeforeStmt = insertBeforeStmt->GetPrevStmt(); } // Found the call. Move the statements comprising the assignment. // JITDUMP("Moving " FMT_STMT "..." FMT_STMT " before " FMT_STMT "\n", cursor->GetNextStmt()->GetID(), asgStmt->GetID(), insertBeforeStmt->GetID()); assert(asgStmt == impLastStmt); do { Statement* movingStmt = impExtractLastStmt(); impInsertStmtBefore(movingStmt, insertBeforeStmt); insertBeforeStmt = movingStmt; } while (impLastStmt != cursor); } } // Create a pointer to the box payload in op1. // op1 = gtNewLclvNode(impBoxTemp, TYP_REF); op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2); // Copy from the exprToBox to the box payload. // if (varTypeIsStruct(exprToBox)) { assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls)); op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL); } else { var_types lclTyp = exprToBox->TypeGet(); if (lclTyp == TYP_BYREF) { lclTyp = TYP_I_IMPL; } CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass); if (impIsPrimitive(jitType)) { lclTyp = JITtype2varType(jitType); } var_types srcTyp = exprToBox->TypeGet(); var_types dstTyp = lclTyp; // We allow float <-> double mismatches and implicit truncation for small types. assert((genActualType(srcTyp) == genActualType(dstTyp)) || (varTypeIsFloating(srcTyp) == varTypeIsFloating(dstTyp))); // Note regarding small types. // We are going to store to the box here via an indirection, so the cast added below is // redundant, since the store has an implicit truncation semantic. The reason we still // add this cast is so that the code which deals with GT_BOX optimizations does not have // to account for this implicit truncation (e. g. understand that BOX<byte>(0xFF + 1) is // actually BOX<byte>(0) or deal with signedness mismatch and other GT_CAST complexities). if (srcTyp != dstTyp) { exprToBox = gtNewCastNode(genActualType(dstTyp), exprToBox, false, dstTyp); } op1 = gtNewAssignNode(gtNewOperNode(GT_IND, dstTyp, op1), exprToBox); } // Spill eval stack to flush out any pending side effects. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); // Set up this copy as a second assignment. Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); op1 = gtNewLclvNode(impBoxTemp, TYP_REF); // Record that this is a "box" node and keep track of the matching parts. op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt); // If it is a value class, mark the "box" node. We can use this information // to optimise several cases: // "box(x) == null" --> false // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod" // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod" op1->gtFlags |= GTF_BOX_VALUE; assert(op1->IsBoxedValue()); assert(asg->gtOper == GT_ASG); } else { // Don't optimize, just call the helper and be done with it. JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable"); assert(operCls != nullptr); // Ensure that the value class is restored op2 = impTokenToHandle(pResolvedToken, nullptr, true /* mustRestoreHandle */); if (op2 == nullptr) { // We must be backing out of an inline. assert(compDonotInline()); return; } GenTreeCall::Use* args = gtNewCallArgs(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true)); op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args); } /* Push the result back on the stack, */ /* even if clsHnd is a value class we want the TI_REF */ typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass)); impPushOnStack(op1, tiRetVal); } //------------------------------------------------------------------------ // impImportNewObjArray: Build and import `new` of multi-dimmensional array // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // pCallInfo - The CORINFO_CALL_INFO that has been initialized // by a call to CEEInfo::getCallInfo(). // // Assumptions: // The multi-dimensional array constructor arguments (array dimensions) are // pushed on the IL stack on entry to this method. // // Notes: // Multi-dimensional array constructors are imported as calls to a JIT // helper, not as regular calls. void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken); if (classHandle == nullptr) { // compDonotInline() return; } assert(pCallInfo->sig.numArgs); GenTree* node; // Reuse the temp used to pass the array dimensions to avoid bloating // the stack frame in case there are multiple calls to multi-dim array // constructors within a single method. if (lvaNewObjArrayArgs == BAD_VAR_NUM) { lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs")); lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK; lvaTable[lvaNewObjArrayArgs].lvExactSize = 0; } // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers // for our call to CORINFO_HELP_NEW_MDARR. lvaTable[lvaNewObjArrayArgs].lvExactSize = max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32)); // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments // to one allocation at a time. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray")); // // The arguments of the CORINFO_HELP_NEW_MDARR helper are: // - Array class handle // - Number of dimension arguments // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp. // node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node); // Pop dimension arguments from the stack one at a time and store it // into lvaNewObjArrayArgs temp. for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--) { GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT); GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest); dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest, new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i)); dest = gtNewOperNode(GT_IND, TYP_INT, dest); node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node); } GenTreeCall::Use* args = gtNewCallArgs(node); // pass number of arguments to the helper args = gtPrependNewCallArg(gtNewIconNode(pCallInfo->sig.numArgs), args); args = gtPrependNewCallArg(classHandle, args); node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args); for (GenTreeCall::Use& use : node->AsCall()->Args()) { node->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } node->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass; // Remember that this basic block contains 'new' of a md array compCurBB->bbFlags |= BBF_HAS_NEWARRAY; impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass)); } GenTree* Compiler::impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform) { switch (transform) { case CORINFO_DEREF_THIS: { GenTree* obj = thisPtr; // This does a LDIND on the obj, which should be a byref. pointing to a ref impBashVarAddrsToI(obj); assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF); CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj); // ldind could point anywhere, example a boxed class static int obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); return obj; } case CORINFO_BOX_THIS: { // Constraint calls where there might be no // unboxed entry point require us to implement the call via helper. // These only occur when a possible target of the call // may have inherited an implementation of an interface // method from System.Object or System.ValueType. The EE does not provide us with // "unboxed" versions of these methods. GenTree* obj = thisPtr; assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL); obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj); obj->gtFlags |= GTF_EXCEPT; CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); if (impIsPrimitive(jitTyp)) { if (obj->OperIsBlk()) { obj->ChangeOperUnchecked(GT_IND); // Obj could point anywhere, example a boxed class static int obj->gtFlags |= GTF_IND_TGTANYWHERE; obj->AsOp()->gtOp2 = nullptr; // must be zero for tree walkers } obj->gtType = JITtype2varType(jitTyp); assert(varTypeIsArithmetic(obj->gtType)); } // This pushes on the dereferenced byref // This is then used immediately to box. impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack()); // This pops off the byref-to-a-value-type remaining on the stack and // replaces it with a boxed object. // This is then used as the object to the virtual call immediately below. impImportAndPushBox(pConstrainedResolvedToken); if (compDonotInline()) { return nullptr; } obj = impPopStack().val; return obj; } case CORINFO_NO_THIS_TRANSFORM: default: return thisPtr; } } //------------------------------------------------------------------------ // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method. // // Return Value: // true if PInvoke inlining should be enabled in current method, false otherwise // // Notes: // Checks a number of ambient conditions where we could pinvoke but choose not to bool Compiler::impCanPInvokeInline() { return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke ; } //------------------------------------------------------------------------ // impCanPInvokeInlineCallSite: basic legality checks using information // from a call to see if the call qualifies as an inline pinvoke. // // Arguments: // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Return Value: // true if this call can legally qualify as an inline pinvoke, false otherwise // // Notes: // For runtimes that support exception handling interop there are // restrictions on using inline pinvoke in handler regions. // // * We have to disable pinvoke inlining inside of filters because // in case the main execution (i.e. in the try block) is inside // unmanaged code, we cannot reuse the inlined stub (we still need // the original state until we are in the catch handler) // // * We disable pinvoke inlining inside handlers since the GSCookie // is in the inlined Frame (see // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but // this would not protect framelets/return-address of handlers. // // These restrictions are currently also in place for CoreCLR but // can be relaxed when coreclr/#8459 is addressed. bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block) { if (block->hasHndIndex()) { return false; } // The remaining limitations do not apply to CoreRT if (IsTargetAbi(CORINFO_CORERT_ABI)) { return true; } #ifdef TARGET_64BIT // On 64-bit platforms, we disable pinvoke inlining inside of try regions. // Note that this could be needed on other architectures too, but we // haven't done enough investigation to know for sure at this point. // // Here is the comment from JIT64 explaining why: // [VSWhidbey: 611015] - because the jitted code links in the // Frame (instead of the stub) we rely on the Frame not being // 'active' until inside the stub. This normally happens by the // stub setting the return address pointer in the Frame object // inside the stub. On a normal return, the return address // pointer is zeroed out so the Frame can be safely re-used, but // if an exception occurs, nobody zeros out the return address // pointer. Thus if we re-used the Frame object, it would go // 'active' as soon as we link it into the Frame chain. // // Technically we only need to disable PInvoke inlining if we're // in a handler or if we're in a try body with a catch or // filter/except where other non-handler code in this method // might run and try to re-use the dirty Frame object. // // A desktop test case where this seems to matter is // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe if (block->hasTryIndex()) { // This does not apply to the raw pinvoke call that is inside the pinvoke // ILStub. In this case, we have to inline the raw pinvoke call into the stub, // otherwise we would end up with a stub that recursively calls itself, and end // up with a stack overflow. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { return true; } return false; } #endif // TARGET_64BIT return true; } //------------------------------------------------------------------------ // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so // if it can be expressed as an inline pinvoke. // // Arguments: // call - tree for the call // methHnd - handle for the method being called (may be null) // sig - signature of the method being called // mflags - method flags for the method being called // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Notes: // Sets GTF_CALL_M_PINVOKE on the call for pinvokes. // // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the // call passes a combination of legality and profitabilty checks. // // If GTF_CALL_UNMANAGED is set, increments info.compUnmanagedCallCountWithGCTransition void Compiler::impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block) { CorInfoCallConvExtension unmanagedCallConv; // If VM flagged it as Pinvoke, flag the call node accordingly if ((mflags & CORINFO_FLG_PINVOKE) != 0) { call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE; } bool suppressGCTransition = false; if (methHnd) { if ((mflags & CORINFO_FLG_PINVOKE) == 0) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd, nullptr, &suppressGCTransition); } else { if (sig->getCallConv() == CORINFO_CALLCONV_DEFAULT || sig->getCallConv() == CORINFO_CALLCONV_VARARG) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(nullptr, sig, &suppressGCTransition); assert(!call->gtCallCookie); } if (suppressGCTransition) { call->gtCallMoreFlags |= GTF_CALL_M_SUPPRESS_GC_TRANSITION; } // If we can't get the unmanaged calling convention or the calling convention is unsupported in the JIT, // return here without inlining the native call. if (unmanagedCallConv == CorInfoCallConvExtension::Managed || unmanagedCallConv == CorInfoCallConvExtension::Fastcall || unmanagedCallConv == CorInfoCallConvExtension::FastcallMemberFunction) { return; } optNativeCallCount++; if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI))) { // PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been // converted to regular method calls earlier using convertPInvokeCalliToCall. // PInvoke CALLI in IL stubs must be inlined } else { // Check legality if (!impCanPInvokeInlineCallSite(block)) { return; } // Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive // inlining in CoreRT. Skip the ambient conditions checks and profitability checks. if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { // Raw PInvoke call in PInvoke IL stub generated must be inlined to avoid infinite // recursive calls to the stub. } else { if (!impCanPInvokeInline()) { return; } // Size-speed tradeoff: don't use inline pinvoke at rarely // executed call sites. The non-inline version is more // compact. if (block->isRunRarely()) { return; } } } // The expensive check should be last if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig)) { return; } } JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s\n", info.compFullName)); call->gtFlags |= GTF_CALL_UNMANAGED; call->unmgdCallConv = unmanagedCallConv; if (!call->IsSuppressGCTransition()) { info.compUnmanagedCallCountWithGCTransition++; } // AMD64 convention is same for native and managed if (unmanagedCallConv == CorInfoCallConvExtension::C || unmanagedCallConv == CorInfoCallConvExtension::CMemberFunction) { call->gtFlags |= GTF_CALL_POP_ARGS; } if (unmanagedCallConv == CorInfoCallConvExtension::Thiscall) { call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL; } } GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di) { var_types callRetTyp = JITtype2varType(sig->retType); /* The function pointer is on top of the stack - It may be a * complex expression. As it is evaluated after the args, * it may cause registered args to be spilled. Simply spill it. */ // Ignore this trivial case. if (impStackTop().val->gtOper != GT_LCL_VAR) { impSpillStackEntry(verCurrentState.esStackDepth - 1, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall")); } /* Get the function pointer */ GenTree* fptr = impPopStack().val; // The function pointer is typically a sized to match the target pointer size // However, stubgen IL optimization can change LDC.I8 to LDC.I4 // See ILCodeStream::LowerOpcode assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT); #ifdef DEBUG // This temporary must never be converted to a double in stress mode, // because that can introduce a call to the cast helper after the // arguments have already been evaluated. if (fptr->OperGet() == GT_LCL_VAR) { lvaTable[fptr->AsLclVarCommon()->GetLclNum()].lvKeepType = 1; } #endif /* Create the call node */ GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); #ifdef UNIX_X86_ABI call->gtFlags &= ~GTF_CALL_POP_ARGS; #endif return call; } /*****************************************************************************/ void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig) { assert(call->gtFlags & GTF_CALL_UNMANAGED); /* Since we push the arguments in reverse order (i.e. right -> left) * spill any side effects from the stack * * OBS: If there is only one side effect we do not need to spill it * thus we have to spill all side-effects except last one */ unsigned lastLevelWithSideEffects = UINT_MAX; unsigned argsToReverse = sig->numArgs; // For "thiscall", the first argument goes in a register. Since its // order does not need to be changed, we do not need to spill it if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { assert(argsToReverse); argsToReverse--; } #ifndef TARGET_X86 // Don't reverse args on ARM or x64 - first four args always placed in regs in order argsToReverse = 0; #endif for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++) { if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF) { assert(lastLevelWithSideEffects == UINT_MAX); impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect")); } else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) { if (lastLevelWithSideEffects != UINT_MAX) { /* We had a previous side effect - must spill it */ impSpillStackEntry(lastLevelWithSideEffects, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect")); /* Record the level for the current side effect in case we will spill it */ lastLevelWithSideEffects = level; } else { /* This is the first side effect encountered - record its level */ lastLevelWithSideEffects = level; } } } /* The argument list is now "clean" - no out-of-order side effects * Pop the argument list in reverse order */ GenTreeCall::Use* args = impPopReverseCallArgs(sig->numArgs, sig, sig->numArgs - argsToReverse); call->AsCall()->gtCallArgs = args; if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { GenTree* thisPtr = args->GetNode(); impBashVarAddrsToI(thisPtr); assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF); } for (GenTreeCall::Use& argUse : GenTreeCall::UseList(args)) { GenTree* arg = argUse.GetNode(); call->gtFlags |= arg->gtFlags & GTF_GLOB_EFFECT; // We should not be passing gc typed args to an unmanaged call. if (varTypeIsGC(arg->TypeGet())) { // Tolerate byrefs by retyping to native int. // // This is needed or we'll generate inconsistent GC info // for this arg at the call site (gc info says byref, // pinvoke sig says native int). // if (arg->TypeGet() == TYP_BYREF) { arg->ChangeType(TYP_I_IMPL); } else { assert(!"*** invalid IL: gc ref passed to unmanaged call"); } } } } //------------------------------------------------------------------------ // impInitClass: Build a node to initialize the class before accessing the // field if necessary // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // // Return Value: If needed, a pointer to the node that will perform the class // initializtion. Otherwise, nullptr. // GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle); if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0) { return nullptr; } bool runtimeLookup; GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup); if (node == nullptr) { assert(compDonotInline()); return nullptr; } if (runtimeLookup) { node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(node)); } else { // Call the shared non gc static helper, as its the fastest node = fgGetSharedCCtor(pResolvedToken->hClass); } return node; } GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp) { GenTree* op1 = nullptr; #if defined(DEBUG) // If we're replaying under SuperPMI, we're going to read the data stored by SuperPMI and use it // for optimization. Unfortunately, SuperPMI doesn't implement a guarantee on the alignment of // this data, so for some platforms which don't allow unaligned access (e.g., Linux arm32), // this can fault. We should fix SuperPMI to guarantee alignment, but that is a big change. // Instead, simply fix up the data here for future use. // This variable should be the largest size element, with the largest alignment requirement, // and the native C++ compiler should guarantee sufficient alignment. double aligned_data = 0.0; void* p_aligned_data = &aligned_data; if (info.compMethodSuperPMIIndex != -1) { switch (lclTyp) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: static_assert_no_msg(sizeof(unsigned __int8) == sizeof(bool)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(signed char)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(unsigned char)); // No alignment necessary for byte. break; case TYP_SHORT: case TYP_USHORT: static_assert_no_msg(sizeof(unsigned __int16) == sizeof(short)); static_assert_no_msg(sizeof(unsigned __int16) == sizeof(unsigned short)); if ((size_t)fldAddr % sizeof(unsigned __int16) != 0) { *(unsigned __int16*)p_aligned_data = GET_UNALIGNED_16(fldAddr); fldAddr = p_aligned_data; } break; case TYP_INT: case TYP_UINT: case TYP_FLOAT: static_assert_no_msg(sizeof(unsigned __int32) == sizeof(int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(unsigned int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(float)); if ((size_t)fldAddr % sizeof(unsigned __int32) != 0) { *(unsigned __int32*)p_aligned_data = GET_UNALIGNED_32(fldAddr); fldAddr = p_aligned_data; } break; case TYP_LONG: case TYP_ULONG: case TYP_DOUBLE: static_assert_no_msg(sizeof(unsigned __int64) == sizeof(__int64)); static_assert_no_msg(sizeof(unsigned __int64) == sizeof(double)); if ((size_t)fldAddr % sizeof(unsigned __int64) != 0) { *(unsigned __int64*)p_aligned_data = GET_UNALIGNED_64(fldAddr); fldAddr = p_aligned_data; } break; default: assert(!"Unexpected lclTyp"); break; } } #endif // DEBUG switch (lclTyp) { int ival; __int64 lval; double dval; case TYP_BOOL: ival = *((bool*)fldAddr); goto IVAL_COMMON; case TYP_BYTE: ival = *((signed char*)fldAddr); goto IVAL_COMMON; case TYP_UBYTE: ival = *((unsigned char*)fldAddr); goto IVAL_COMMON; case TYP_SHORT: ival = *((short*)fldAddr); goto IVAL_COMMON; case TYP_USHORT: ival = *((unsigned short*)fldAddr); goto IVAL_COMMON; case TYP_UINT: case TYP_INT: ival = *((int*)fldAddr); IVAL_COMMON: op1 = gtNewIconNode(ival); break; case TYP_LONG: case TYP_ULONG: lval = *((__int64*)fldAddr); op1 = gtNewLconNode(lval); break; case TYP_FLOAT: dval = *((float*)fldAddr); op1 = gtNewDconNode(dval); op1->gtType = TYP_FLOAT; break; case TYP_DOUBLE: dval = *((double*)fldAddr); op1 = gtNewDconNode(dval); break; default: assert(!"Unexpected lclTyp"); break; } return op1; } GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp) { // Ordinary static fields never overlap. RVA statics, however, can overlap (if they're // mapped to the same ".data" declaration). That said, such mappings only appear to be // possible with ILASM, and in ILASM-produced (ILONLY) images, RVA statics are always // read-only (using "stsfld" on them is UB). In mixed-mode assemblies, RVA statics can // be mutable, but the only current producer of such images, the C++/CLI compiler, does // not appear to support mapping different fields to the same address. So we will say // that "mutable overlapping RVA statics" are UB as well. If this ever changes, code in // morph and value numbering will need to be updated to respect "gtFldMayOverlap" and // "NotAField FldSeq". // For statics that are not "boxed", the initial address tree will contain the field sequence. // For those that are, we will attach it later, when adding the indirection for the box, since // that tree will represent the true address. bool isBoxedStatic = (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) != 0; FieldSeqNode* innerFldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField) : FieldSeqStore::NotAField(); GenTree* op1; switch (pFieldInfo->fieldAccessor) { case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: { assert(!compIsForInlining()); // We first call a special helper to get the statics base pointer op1 = impParentClassTokenToHandle(pResolvedToken); // compIsForInlining() is false so we should not get NULL here assert(op1 != nullptr); var_types type = TYP_BYREF; switch (pFieldInfo->helper) { case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE: type = TYP_I_IMPL; break; case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE: break; default: assert(!"unknown generic statics helper"); break; } op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewCallArgs(op1)); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); } break; case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); } else #endif { op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper); } op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); break; } case CORINFO_FIELD_STATIC_READYTORUN_HELPER: { #ifdef FEATURE_READYTORUN assert(opts.IsReadyToRun()); assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); assert(kind.needsRuntimeLookup); GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); GenTreeCall::Use* args = gtNewCallArgs(ctxTree); GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } var_types type = TYP_BYREF; op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); #else unreached(); #endif // FEATURE_READYTORUN } break; default: { // Do we need the address of a static field? // if (access & CORINFO_ACCESS_ADDRESS) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr); // We should always be able to access this static's address directly. assert(pFldAddr == nullptr); // Create the address node. GenTreeFlags handleKind = isBoxedStatic ? GTF_ICON_STATIC_BOX_PTR : GTF_ICON_STATIC_HDL; op1 = gtNewIconHandleNode((size_t)fldAddr, handleKind, innerFldSeq); #ifdef DEBUG op1->AsIntCon()->gtTargetHandle = op1->AsIntCon()->gtIconVal; #endif if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_ICON_INITCLASS; } } else // We need the value of a static field { // In future, it may be better to just create the right tree here instead of folding it later. op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField); if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_FLD_INITCLASS; } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1->ChangeType(TYP_REF); // points at boxed object op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING; } } return op1; } break; } } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); } if (!(access & CORINFO_ACCESS_ADDRESS)) { if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF; } } return op1; } // In general try to call this before most of the verification work. Most people expect the access // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns // out if you can't access something we also think that you're unverifiable for other reasons. void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { if (result != CORINFO_ACCESS_ALLOWED) { impHandleAccessAllowedInternal(result, helperCall); } } void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { switch (result) { case CORINFO_ACCESS_ALLOWED: break; case CORINFO_ACCESS_ILLEGAL: // if we're verifying, then we need to reject the illegal access to ensure that we don't think the // method is verifiable. Otherwise, delay the exception to runtime. if (compIsForImportOnly()) { info.compCompHnd->ThrowExceptionForHelper(helperCall); } else { impInsertHelperCall(helperCall); } break; } } void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo) { // Construct the argument list GenTreeCall::Use* args = nullptr; assert(helperInfo->helperNum != CORINFO_HELP_UNDEF); for (unsigned i = helperInfo->numArgs; i > 0; --i) { const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1]; GenTree* currentArg = nullptr; switch (helperArg.argType) { case CORINFO_HELPER_ARG_TYPE_Field: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass(helperArg.fieldHandle)); currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle); break; case CORINFO_HELPER_ARG_TYPE_Method: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle); currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle); break; case CORINFO_HELPER_ARG_TYPE_Class: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle); currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle); break; case CORINFO_HELPER_ARG_TYPE_Module: currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle); break; case CORINFO_HELPER_ARG_TYPE_Const: currentArg = gtNewIconNode(helperArg.constant); break; default: NO_WAY("Illegal helper arg type"); } args = gtPrependNewCallArg(currentArg, args); } /* TODO-Review: * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee. * Also, consider sticking this in the first basic block. */ GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args); impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } //------------------------------------------------------------------------ // impTailCallRetTypeCompatible: Checks whether the return types of caller // and callee are compatible so that calle can be tail called. // sizes are not supported integral type sizes return values to temps. // // Arguments: // allowWidening -- whether to allow implicit widening by the callee. // For instance, allowing int32 -> int16 tailcalls. // The managed calling convention allows this, but // we don't want explicit tailcalls to depend on this // detail of the managed calling convention. // callerRetType -- the caller's return type // callerRetTypeClass - the caller's return struct type // callerCallConv -- calling convention of the caller // calleeRetType -- the callee's return type // calleeRetTypeClass - the callee return struct type // calleeCallConv -- calling convention of the callee // // Returns: // True if the tailcall types are compatible. // // Remarks: // Note that here we don't check compatibility in IL Verifier sense, but on the // lines of return types getting returned in the same return register. bool Compiler::impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv) { // Early out if the types are the same. if (callerRetType == calleeRetType) { return true; } // For integral types the managed calling convention dictates that callee // will widen the return value to 4 bytes, so we can allow implicit widening // in managed to managed tailcalls when dealing with <= 4 bytes. bool isManaged = (callerCallConv == CorInfoCallConvExtension::Managed) && (calleeCallConv == CorInfoCallConvExtension::Managed); if (allowWidening && isManaged && varTypeIsIntegral(callerRetType) && varTypeIsIntegral(calleeRetType) && (genTypeSize(callerRetType) <= 4) && (genTypeSize(calleeRetType) <= genTypeSize(callerRetType))) { return true; } // If the class handles are the same and not null, the return types are compatible. if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass)) { return true; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Jit64 compat: if (callerRetType == TYP_VOID) { // This needs to be allowed to support the following IL pattern that Jit64 allows: // tail.call // pop // ret // // Note that the above IL pattern is not valid as per IL verification rules. // Therefore, only full trust code can take advantage of this pattern. return true; } // These checks return true if the return value type sizes are the same and // get returned in the same return register i.e. caller doesn't need to normalize // return value. Some of the tail calls permitted by below checks would have // been rejected by IL Verifier before we reached here. Therefore, only full // trust code can make those tail calls. unsigned callerRetTypeSize = 0; unsigned calleeRetTypeSize = 0; bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs, callerCallConv); bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs, calleeCallConv); if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg) { return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize); } #endif // TARGET_AMD64 || TARGET_ARM64 return false; } /******************************************************************************** * * Returns true if the current opcode and and the opcodes following it correspond * to a supported tail call IL pattern. * */ bool Compiler::impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive) { // Bail out if the current opcode is not a call. if (!impOpcodeIsCallOpcode(curOpcode)) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // If shared ret tail opt is not enabled, we will enable // it for recursive methods. if (isRecursive) #endif { // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the // sequence. Make sure we don't go past the end of the IL however. codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize); } // Bail out if there is no next opcode after call if (codeAddrOfNextOpcode >= codeEnd) { return false; } OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode); return (nextOpcode == CEE_RET); } /***************************************************************************** * * Determine whether the call could be converted to an implicit tail call * */ bool Compiler::impIsImplicitTailCallCandidate( OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive) { #if FEATURE_TAILCALL_OPT if (!opts.compTailCallOpt) { return false; } if (opts.OptimizationDisabled()) { return false; } // must not be tail prefixed if (prefixFlags & PREFIX_TAILCALL_EXPLICIT) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN // must be call+ret or call+pop+ret if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive)) { return false; } return true; #else return false; #endif // FEATURE_TAILCALL_OPT } //------------------------------------------------------------------------ // impImportCall: import a call-inspiring opcode // // Arguments: // opcode - opcode that inspires the call // pResolvedToken - resolved token for the call target // pConstrainedResolvedToken - resolved constraint token (or nullptr) // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr) // prefixFlags - IL prefix flags for the call // callInfo - EE supplied info for the call // rawILOffset - IL offset of the opcode, used for guarded devirtualization. // // Returns: // Type of the call's return value. // If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF. // However we can't assert for this here yet because there are cases we miss. See issue #13272. // // // Notes: // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ. // // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated // uninitalized object. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif var_types Compiler::impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset) { assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI); // The current statement DI may not refer to the exact call, but for calls // we wish to be able to attach the exact IL instruction to get "return // value" support in the debugger, so create one with the exact IL offset. DebugInfo di = impCreateDIWithCurrentStackInfo(rawILOffset, true); var_types callRetTyp = TYP_COUNT; CORINFO_SIG_INFO* sig = nullptr; CORINFO_METHOD_HANDLE methHnd = nullptr; CORINFO_CLASS_HANDLE clsHnd = nullptr; unsigned clsFlags = 0; unsigned mflags = 0; GenTree* call = nullptr; GenTreeCall::Use* args = nullptr; CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM; CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr; bool exactContextNeedsRuntimeLookup = false; bool canTailCall = true; const char* szCanTailCallFailReason = nullptr; const int tailCallFlags = (prefixFlags & PREFIX_TAILCALL); const bool isReadonlyCall = (prefixFlags & PREFIX_READONLY) != 0; CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr; // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could // do that before tailcalls, but that is probably not the intended // semantic. So just disallow tailcalls from synchronized methods. // Also, popping arguments in a varargs function is more work and NYI // If we have a security object, we have to keep our frame around for callers // to see any imperative security. // Reverse P/Invokes need a call to CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT // at the end, so tailcalls should be disabled. if (info.compFlags & CORINFO_FLG_SYNCH) { canTailCall = false; szCanTailCallFailReason = "Caller is synchronized"; } else if (opts.IsReversePInvoke()) { canTailCall = false; szCanTailCallFailReason = "Caller is Reverse P/Invoke"; } #if !FEATURE_FIXED_OUT_ARGS else if (info.compIsVarArgs) { canTailCall = false; szCanTailCallFailReason = "Caller is varargs"; } #endif // FEATURE_FIXED_OUT_ARGS // We only need to cast the return value of pinvoke inlined calls that return small types // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there. // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for // the time being that the callee might be compiled by the other JIT and thus the return // value will need to be widened by us (or not widened at all...) // ReadyToRun code sticks with default calling convention that does not widen small return types. bool checkForSmallType = opts.IsReadyToRun(); bool bIntrinsicImported = false; CORINFO_SIG_INFO calliSig; GenTreeCall::Use* extraArg = nullptr; /*------------------------------------------------------------------------- * First create the call node */ if (opcode == CEE_CALLI) { if (IsTargetAbi(CORINFO_CORERT_ABI)) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block))) { eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo); return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset); } } /* Get the call site sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig); callRetTyp = JITtype2varType(calliSig.retType); call = impImportIndirectCall(&calliSig, di); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif sig = &calliSig; } else // (opcode != CEE_CALLI) { NamedIntrinsic ni = NI_Illegal; // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to // supply the instantiation parameters necessary to make direct calls to underlying // shared generic code, rather than calling through instantiating stubs. If the // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT // must indeed pass an instantiation parameter. methHnd = callInfo->hMethod; sig = &(callInfo->sig); callRetTyp = JITtype2varType(sig->retType); mflags = callInfo->methodFlags; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif if (compIsForInlining()) { /* Does the inlinee use StackCrawlMark */ if (mflags & CORINFO_FLG_DONT_INLINE_CALLER) { compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK); return TYP_UNDEF; } /* For now ignore varargs */ if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS); return TYP_UNDEF; } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return TYP_UNDEF; } if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT)) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL); return TYP_UNDEF; } } clsHnd = pResolvedToken->hClass; clsFlags = callInfo->classFlags; #ifdef DEBUG // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute. // This recognition should really be done by knowing the methHnd of the relevant Mark method(s). // These should be in corelib.h, and available through a JIT/EE interface call. const char* modName; const char* className; const char* methodName; if ((className = eeGetClassName(clsHnd)) != nullptr && strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 && (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0) { return impImportJitTestLabelMark(sig->numArgs); } #endif // DEBUG // <NICE> Factor this into getCallInfo </NICE> bool isSpecialIntrinsic = false; if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_INTRINSIC)) != 0) { const bool isTailCall = canTailCall && (tailCallFlags != 0); call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, isReadonlyCall, isTailCall, pConstrainedResolvedToken, callInfo->thisTransform, &ni, &isSpecialIntrinsic); if (compDonotInline()) { return TYP_UNDEF; } if (call != nullptr) { #ifdef FEATURE_READYTORUN if (call->OperGet() == GT_INTRINSIC) { if (opts.IsReadyToRun()) { noway_assert(callInfo->kind == CORINFO_CALL); call->AsIntrinsic()->gtEntryPoint = callInfo->codePointerLookup.constLookup; } else { call->AsIntrinsic()->gtEntryPoint.addr = nullptr; call->AsIntrinsic()->gtEntryPoint.accessType = IAT_VALUE; } } #endif bIntrinsicImported = true; goto DONE_CALL; } } #ifdef FEATURE_SIMD if (featureSIMD) { call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token); if (call != nullptr) { bIntrinsicImported = true; goto DONE_CALL; } } #endif // FEATURE_SIMD if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG) { BADCODE("Bad calling convention"); } //------------------------------------------------------------------------- // Construct the call node // // Work out what sort of call we're making. // Dispense with virtual calls implemented via LDVIRTFTN immediately. constraintCallThisTransform = callInfo->thisTransform; exactContextHnd = callInfo->contextHandle; exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup; switch (callInfo->kind) { case CORINFO_VIRTUALCALL_STUB: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); if (callInfo->stubLookup.lookupKind.needsRuntimeLookup) { if (callInfo->stubLookup.lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE); return TYP_UNDEF; } GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd); assert(!compDonotInline()); // This is the rough code to set up an indirect stub call assert(stubAddr != nullptr); // The stubAddr may be a // complex expression. As it is evaluated after the args, // it may cause registered args to be spilled. Simply spill it. unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup")); impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE); stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr); call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT); call->gtFlags |= GTF_CALL_VIRT_STUB; #ifdef TARGET_X86 // No tailcalls allowed for these yet... canTailCall = false; szCanTailCallFailReason = "VirtualCall with runtime lookup"; #endif } else { // The stub address is known at compile time call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->AsCall()->gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr; call->gtFlags |= GTF_CALL_VIRT_STUB; assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE && callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE); if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT; } } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is sometimes needed for ready to run to handle // non-virtual <-> virtual changes between versions if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } } #endif break; } case CORINFO_VIRTUALCALL_VTABLE: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->gtFlags |= GTF_CALL_VIRT_VTABLE; // Should we expand virtual call targets early for this method? // if (opts.compExpandCallsEarly) { // Mark this method to expand the virtual call target early in fgMorpgCall call->AsCall()->SetExpandedEarly(); } break; } case CORINFO_VIRTUALCALL_LDVIRTFTN: { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN); return TYP_UNDEF; } assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); // OK, We've been told to call via LDVIRTFTN, so just // take the call now.... GenTreeCall::Use* args = impPopCallArgs(sig->numArgs, sig); GenTree* thisPtr = impPopStack().val; thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform); assert(thisPtr != nullptr); // Clone the (possibly transformed) "this" pointer GenTree* thisPtrCopy; thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("LDVIRTFTN this pointer")); GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo); assert(fptr != nullptr); thisPtr = nullptr; // can't reuse it // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node call = gtNewIndCallNode(fptr, callRetTyp, args, di); call->AsCall()->gtCallThisArg = gtNewCallArgs(thisPtrCopy); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { // CoreRT generic virtual method: need to handle potential fat function pointers addFatPointerCandidate(call->AsCall()); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is needed for ready to run to handle // non-virtual <-> virtual changes between versions call->gtFlags |= GTF_CALL_NULLCHECK; } #endif // Sine we are jumping over some code, check that its OK to skip that code assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); goto DONE; } case CORINFO_CALL: { // This is for a non-virtual, non-interface etc. call call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); // We remove the nullcheck for the GetType call intrinsic. // TODO-CQ: JIT64 does not introduce the null check for many more helper calls // and intrinsics. if (callInfo->nullInstanceCheck && !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (ni == NI_System_Object_GetType))) { call->gtFlags |= GTF_CALL_NULLCHECK; } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { call->AsCall()->setEntryPoint(callInfo->codePointerLookup.constLookup); } #endif break; } case CORINFO_CALL_CODE_POINTER: { // The EE has asked us to call by computing a code pointer and then doing an // indirect call. This is because a runtime lookup is required to get the code entry point. // These calls always follow a uniform calling convention, i.e. no extra hidden params assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); GenTree* fptr = impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod); if (compDonotInline()) { return TYP_UNDEF; } // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } break; } default: assert(!"unknown call kind"); break; } //------------------------------------------------------------------------- // Set more flags PREFIX_ASSUME(call != nullptr); if (mflags & CORINFO_FLG_NOGCCHECK) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK; } // Mark call if it's one of the ones we will maybe treat as an intrinsic if (isSpecialIntrinsic) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC; } } assert(sig); assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set. /* Some sanity checks */ // CALL_VIRT and NEWOBJ must have a THIS pointer assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS)); // static bit and hasThis are negations of one another assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0)); assert(call != nullptr); /*------------------------------------------------------------------------- * Check special-cases etc */ /* Special case - Check if it is a call to Delegate.Invoke(). */ if (mflags & CORINFO_FLG_DELEGATE_INVOKE) { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(mflags & CORINFO_FLG_FINAL); /* Set the delegate flag */ call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV; if (callInfo->wrapperDelegateInvoke) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_WRAPPER_DELEGATE_INV; } if (opcode == CEE_CALLVIRT) { assert(mflags & CORINFO_FLG_FINAL); /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */ assert(call->gtFlags & GTF_CALL_NULLCHECK); call->gtFlags &= ~GTF_CALL_NULLCHECK; } } CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass; actualMethodRetTypeSigClass = sig->retTypeSigClass; /* Check for varargs */ if (!compFeatureVarArg() && ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)) { BADCODE("Varargs not supported."); } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { assert(!compIsForInlining()); /* Set the right flags */ call->gtFlags |= GTF_CALL_POP_ARGS; call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VARARGS; /* Can't allow tailcall for varargs as it is caller-pop. The caller will be expecting to pop a certain number of arguments, but if we tailcall to a function with a different number of arguments, we are hosed. There are ways around this (caller remembers esp value, varargs is not caller-pop, etc), but not worth it. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is varargs"; } #endif /* Get the total number of arguments - this is already correct * for CALLI - for methods we have to get it from the call site */ if (opcode != CEE_CALLI) { #ifdef DEBUG unsigned numArgsDef = sig->numArgs; #endif eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); // For vararg calls we must be sure to load the return type of the // method actually being called, as well as the return types of the // specified in the vararg signature. With type equivalency, these types // may not be the same. if (sig->retTypeSigClass != actualMethodRetTypeSigClass) { if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass); } } assert(numArgsDef <= sig->numArgs); } /* We will have "cookie" as the last argument but we cannot push * it on the operand stack because we may overflow, so we append it * to the arg list next after we pop them */ } //--------------------------- Inline NDirect ------------------------------ // For inline cases we technically should look at both the current // block and the call site block (or just the latter if we've // fused the EH trees). However the block-related checks pertain to // EH and we currently won't inline a method with EH. So for // inlinees, just checking the call site block is sufficient. { // New lexical block here to avoid compilation errors because of GOTOs. BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block); } #ifdef UNIX_X86_ABI // On Unix x86 we use caller-cleaned convention. if ((call->gtFlags & GTF_CALL_UNMANAGED) == 0) call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI if (call->gtFlags & GTF_CALL_UNMANAGED) { // We set up the unmanaged call by linking the frame, disabling GC, etc // This needs to be cleaned up on return. // In addition, native calls have different normalization rules than managed code // (managed calling convention always widens return values in the callee) if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is native"; } checkForSmallType = true; impPopArgsForUnmanagedCall(call, sig); goto DONE; } else if ((opcode == CEE_CALLI) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG)) { if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig)) { // Normally this only happens with inlining. // However, a generic method (or type) being NGENd into another module // can run into this issue as well. There's not an easy fall-back for NGEN // so instead we fallback to JIT. if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE); } else { IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)"); } return TYP_UNDEF; } GenTree* cookie = eeGetPInvokeCookie(sig); // This cookie is required to be either a simple GT_CNS_INT or // an indirection of a GT_CNS_INT // GenTree* cookieConst = cookie; if (cookie->gtOper == GT_IND) { cookieConst = cookie->AsOp()->gtOp1; } assert(cookieConst->gtOper == GT_CNS_INT); // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that // we won't allow this tree to participate in any CSE logic // cookie->gtFlags |= GTF_DONT_CSE; cookieConst->gtFlags |= GTF_DONT_CSE; call->AsCall()->gtCallCookie = cookie; if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "PInvoke calli"; } } /*------------------------------------------------------------------------- * Create the argument list */ //------------------------------------------------------------------------- // Special case - for varargs we have an implicit last argument if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { assert(!compIsForInlining()); void *varCookie, *pVarCookie; if (!info.compCompHnd->canGetVarArgsHandle(sig)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE); return TYP_UNDEF; } varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie); assert((!varCookie) != (!pVarCookie)); GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig); assert(extraArg == nullptr); extraArg = gtNewCallArgs(cookie); } //------------------------------------------------------------------------- // Extra arg for shared generic code and array methods // // Extra argument containing instantiation information is passed in the // following circumstances: // (a) To the "Address" method on array classes; the extra parameter is // the array's type handle (a TypeDesc) // (b) To shared-code instance methods in generic structs; the extra parameter // is the struct's type handle (a vtable ptr) // (c) To shared-code per-instantiation non-generic static methods in generic // classes and structs; the extra parameter is the type handle // (d) To shared-code generic methods; the extra parameter is an // exact-instantiation MethodDesc // // We also set the exact type context associated with the call so we can // inline the call correctly later on. if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE) { assert(call->AsCall()->gtCallType == CT_USER_FUNC); if (clsHnd == nullptr) { NO_WAY("CALLI on parameterized type"); } assert(opcode != CEE_CALLI); GenTree* instParam; bool runtimeLookup; // Instantiated generic method if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD) { assert(exactContextHnd != METHOD_BEING_COMPILED_CONTEXT()); CORINFO_METHOD_HANDLE exactMethodHandle = (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK); if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbMethHndNode(exactMethodHandle); info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle); } } else { instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } // otherwise must be an instance method in a generic struct, // a static method in a generic type, or a runtime-generated array method else { assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); CORINFO_CLASS_HANDLE exactClassHandle = eeGetClassFromContext(exactContextHnd); if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD); return TYP_UNDEF; } if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall) { // We indicate "readonly" to the Address operation by using a null // instParam. instParam = gtNewIconNode(0, TYP_REF); } else if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbClsHndNode(exactClassHandle); info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle); } } else { instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } assert(extraArg == nullptr); extraArg = gtNewCallArgs(instParam); } if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0)) { // Only verifiable cases are supported. // dup; ldvirtftn; newobj; or ldftn; newobj. // IL test could contain unverifiable sequence, in this case optimization should not be done. if (impStackHeight() > 0) { typeInfo delegateTypeInfo = impStackTop().seTypeInfo; if (delegateTypeInfo.IsToken()) { ldftnToken = delegateTypeInfo.GetToken(); } } } //------------------------------------------------------------------------- // The main group of arguments args = impPopCallArgs(sig->numArgs, sig, extraArg); call->AsCall()->gtCallArgs = args; for (GenTreeCall::Use& use : call->AsCall()->Args()) { call->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } //------------------------------------------------------------------------- // The "this" pointer if (((mflags & CORINFO_FLG_STATIC) == 0) && ((sig->callConv & CORINFO_CALLCONV_EXPLICITTHIS) == 0) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr))) { GenTree* obj; if (opcode == CEE_NEWOBJ) { obj = newobjThis; } else { obj = impPopStack().val; obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform); if (compDonotInline()) { return TYP_UNDEF; } } // Store the "this" value in the call call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT; call->AsCall()->gtCallThisArg = gtNewCallArgs(obj); // Is this a virtual or interface call? if (call->AsCall()->IsVirtual()) { // only true object pointers can be virtual assert(obj->gtType == TYP_REF); // See if we can devirtualize. const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isLateDevirtualization = false; impDevirtualizeCall(call->AsCall(), pResolvedToken, &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle, &exactContextHnd, isLateDevirtualization, isExplicitTailCall, // Take care to pass raw IL offset here as the 'debug info' might be different for // inlinees. rawILOffset); // Devirtualization may change which method gets invoked. Update our local cache. // methHnd = callInfo->hMethod; } if (impIsThis(obj)) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS; } } //------------------------------------------------------------------------- // The "this" pointer for "newobj" if (opcode == CEE_NEWOBJ) { if (clsFlags & CORINFO_FLG_VAROBJSIZE) { assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately // This is a 'new' of a variable sized object, wher // the constructor is to return the object. In this case // the constructor claims to return VOID but we know it // actually returns the new object assert(callRetTyp == TYP_VOID); callRetTyp = TYP_REF; call->gtType = TYP_REF; impSpillSpecialSideEff(); impPushOnStack(call, typeInfo(TI_REF, clsHnd)); } else { if (clsFlags & CORINFO_FLG_DELEGATE) { // New inliner morph it in impImportCall. // This will allow us to inline the call to the delegate constructor. call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken); } if (!bIntrinsicImported) { #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // append the call node. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Now push the value of the 'new onto the stack // This is a 'new' of a non-variable sized object. // Append the new node (op1) to the statement list, // and then push the local holding the value of this // new instruction on the stack. if (clsFlags & CORINFO_FLG_VALUECLASS) { assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR); unsigned tmp = newobjThis->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack()); } else { if (newobjThis->gtOper == GT_COMMA) { // We must have inserted the callout. Get the real newobj. newobjThis = newobjThis->AsOp()->gtOp2; } assert(newobjThis->gtOper == GT_LCL_VAR); impPushOnStack(gtNewLclvNode(newobjThis->AsLclVarCommon()->GetLclNum(), TYP_REF), typeInfo(TI_REF, clsHnd)); } } return callRetTyp; } DONE: #ifdef DEBUG // In debug we want to be able to register callsites with the EE. assert(call->AsCall()->callSig == nullptr); call->AsCall()->callSig = new (this, CMK_Generic) CORINFO_SIG_INFO; *call->AsCall()->callSig = *sig; #endif // Final importer checks for calls flagged as tail calls. // if (tailCallFlags != 0) { const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isImplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_IMPLICIT) != 0; const bool isStressTailCall = (tailCallFlags & PREFIX_TAILCALL_STRESS) != 0; // Exactly one of these should be true. assert(isExplicitTailCall != isImplicitTailCall); // This check cannot be performed for implicit tail calls for the reason // that impIsImplicitTailCallCandidate() is not checking whether return // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT. // As a result it is possible that in the following case, we find that // the type stack is non-empty if Callee() is considered for implicit // tail calling. // int Caller(..) { .... void Callee(); ret val; ... } // // Note that we cannot check return type compatibility before ImpImportCall() // as we don't have required info or need to duplicate some of the logic of // ImpImportCall(). // // For implicit tail calls, we perform this check after return types are // known to be compatible. if (isExplicitTailCall && (verCurrentState.esStackDepth != 0)) { BADCODE("Stack should be empty after tailcall"); } // For opportunistic tailcalls we allow implicit widening, i.e. tailcalls from int32 -> int16, since the // managed calling convention dictates that the callee widens the value. For explicit tailcalls we don't // want to require this detail of the calling convention to bubble up to the tailcall helpers bool allowWidening = isImplicitTailCall; if (canTailCall && !impTailCallRetTypeCompatible(allowWidening, info.compRetType, info.compMethodInfo->args.retTypeClass, info.compCallConv, callRetTyp, sig->retTypeClass, call->AsCall()->GetUnmanagedCallConv())) { canTailCall = false; szCanTailCallFailReason = "Return types are not tail call compatible"; } // Stack empty check for implicit tail calls. if (canTailCall && isImplicitTailCall && (verCurrentState.esStackDepth != 0)) { #ifdef TARGET_AMD64 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException // in JIT64, not an InvalidProgramException. Verify(false, "Stack should be empty after tailcall"); #else // TARGET_64BIT BADCODE("Stack should be empty after tailcall"); #endif //! TARGET_64BIT } // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN); // Ask VM for permission to tailcall if (canTailCall) { // True virtual or indirect calls, shouldn't pass in a callee handle. CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->AsCall()->gtCallType != CT_USER_FUNC) || call->AsCall()->IsVirtual()) ? nullptr : methHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, isExplicitTailCall)) { if (isExplicitTailCall) { // In case of explicit tail calls, mark it so that it is not considered // for in-lining. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_EXPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); if (isStressTailCall) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_STRESS_TAILCALL; JITDUMP("\nGTF_CALL_M_STRESS_TAILCALL set for call [%06u]\n", dspTreeID(call)); } } else { #if FEATURE_TAILCALL_OPT // Must be an implicit tail call. assert(isImplicitTailCall); // It is possible that a call node is both an inline candidate and marked // for opportunistic tail calling. In-lining happens before morhphing of // trees. If in-lining of an in-line candidate gets aborted for whatever // reason, it will survive to the morphing stage at which point it will be // transformed into a tail call after performing additional checks. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_IMPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); #else //! FEATURE_TAILCALL_OPT NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls"); #endif // FEATURE_TAILCALL_OPT } // This might or might not turn into a tailcall. We do more // checks in morph. For explicit tailcalls we need more // information in morph in case it turns out to be a // helper-based tailcall. if (isExplicitTailCall) { assert(call->AsCall()->tailCallInfo == nullptr); call->AsCall()->tailCallInfo = new (this, CMK_CorTailCallInfo) TailCallSiteInfo; switch (opcode) { case CEE_CALLI: call->AsCall()->tailCallInfo->SetCalli(sig); break; case CEE_CALLVIRT: call->AsCall()->tailCallInfo->SetCallvirt(sig, pResolvedToken); break; default: call->AsCall()->tailCallInfo->SetCall(sig, pResolvedToken); break; } } } else { // canTailCall reported its reasons already canTailCall = false; JITDUMP("\ninfo.compCompHnd->canTailCall returned false for call [%06u]\n", dspTreeID(call)); } } else { // If this assert fires it means that canTailCall was set to false without setting a reason! assert(szCanTailCallFailReason != nullptr); JITDUMP("\nRejecting %splicit tail call for [%06u], reason: '%s'\n", isExplicitTailCall ? "ex" : "im", dspTreeID(call), szCanTailCallFailReason); info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, isExplicitTailCall, TAILCALL_FAIL, szCanTailCallFailReason); } } // Note: we assume that small return types are already normalized by the managed callee // or by the pinvoke stub for calls to unmanaged code. if (!bIntrinsicImported) { // // Things needed to be checked when bIntrinsicImported is false. // assert(call->gtOper == GT_CALL); assert(callInfo != nullptr); if (compIsForInlining() && opcode == CEE_CALLVIRT) { GenTree* callObj = call->AsCall()->gtCallThisArg->GetNode(); if ((call->AsCall()->IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, call->AsCall()->gtCallArgs, callObj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // Extra checks for tail calls and tail recursion. // // A tail recursive call is a potential loop from the current block to the start of the root method. // If we see a tail recursive call, mark the blocks from the call site back to the entry as potentially // being in a loop. // // Note: if we're importing an inlinee we don't mark the right set of blocks, but by then it's too // late. Currently this doesn't lead to problems. See GitHub issue 33529. // // OSR also needs to handle tail calls specially: // * block profiling in OSR methods needs to ensure probes happen before tail calls, not after. // * the root method entry must be imported if there's a recursive tail call or a potentially // inlineable tail call. // if ((tailCallFlags != 0) && canTailCall) { if (gtIsRecursiveCall(methHnd)) { assert(verCurrentState.esStackDepth == 0); BasicBlock* loopHead = nullptr; if (!compIsForInlining() && opts.IsOSR()) { // For root method OSR we may branch back to the actual method entry, // which is not fgFirstBB, and which we will need to import. assert(fgEntryBB != nullptr); loopHead = fgEntryBB; } else { // For normal jitting we may branch back to the firstBB; this // should already be imported. loopHead = fgFirstBB; } JITDUMP("\nTail recursive call [%06u] in the method. Mark " FMT_BB " to " FMT_BB " as having a backward branch.\n", dspTreeID(call), loopHead->bbNum, compCurBB->bbNum); fgMarkBackwardJump(loopHead, compCurBB); } // We only do these OSR checks in the root method because: // * If we fail to import the root method entry when importing the root method, we can't go back // and import it during inlining. So instead of checking jsut for recursive tail calls we also // have to check for anything that might introduce a recursive tail call. // * We only instrument root method blocks in OSR methods, // if (opts.IsOSR() && !compIsForInlining()) { // If a root method tail call candidate block is not a BBJ_RETURN, it should have a unique // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // if (compCurBB->bbJumpKind != BBJ_RETURN) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); assert(successor->bbJumpKind == BBJ_RETURN); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } // If this call might eventually turn into a loop back to method entry, make sure we // import the method entry. // assert(call->IsCall()); GenTreeCall* const actualCall = call->AsCall(); const bool mustImportEntryBlock = gtIsRecursiveCall(methHnd) || actualCall->IsInlineCandidate() || actualCall->IsGuardedDevirtualizationCandidate(); // Only schedule importation if we're not currently importing. // if (mustImportEntryBlock && (compCurBB != fgEntryBB)) { JITDUMP("\nOSR: inlineable or recursive tail call [%06u] in the method, so scheduling " FMT_BB " for importation\n", dspTreeID(call), fgEntryBB->bbNum); impImportBlockPending(fgEntryBB); } } } if ((sig->flags & CORINFO_SIGFLAG_FAT_CALL) != 0) { assert(opcode == CEE_CALLI || callInfo->kind == CORINFO_CALL_CODE_POINTER); addFatPointerCandidate(call->AsCall()); } DONE_CALL: // Push or append the result of the call if (callRetTyp == TYP_VOID) { if (opcode == CEE_NEWOBJ) { // we actually did push something, so don't spill the thing we just pushed. assert(verCurrentState.esStackDepth > 0); impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtDI); } else { impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } } else { impSpillSpecialSideEff(); if (clsFlags & CORINFO_FLG_ARRAY) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass); tiRetVal.NormaliseForStack(); // The CEE_READONLY prefix modifies the verification semantics of an Address // operation on an array type. if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall && tiRetVal.IsByRef()) { tiRetVal.SetIsReadonlyByRef(); } if (call->IsCall()) { // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call) GenTreeCall* origCall = call->AsCall(); const bool isFatPointerCandidate = origCall->IsFatPointerCandidate(); const bool isInlineCandidate = origCall->IsInlineCandidate(); const bool isGuardedDevirtualizationCandidate = origCall->IsGuardedDevirtualizationCandidate(); if (varTypeIsStruct(callRetTyp)) { // Need to treat all "split tree" cases here, not just inline candidates call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass); } // TODO: consider handling fatcalli cases this way too...? if (isInlineCandidate || isGuardedDevirtualizationCandidate) { // We should not have made any adjustments in impFixupCallStructReturn // as we defer those until we know the fate of the call. assert(call == origCall); assert(opts.OptEnabled(CLFLG_INLINING)); assert(!isFatPointerCandidate); // We should not try to inline calli. // Make the call its own tree (spill the stack if needed). // Do not consume the debug info here. This is particularly // important if we give up on the inline, in which case the // call will typically end up in the statement that contains // the GT_RET_EXPR that we leave on the stack. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI, false); // TODO: Still using the widened type. GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags); // Link the retExpr to the call so if necessary we can manipulate it later. origCall->gtInlineCandidateInfo->retExpr = retExpr; // Propagate retExpr as the placeholder for the call. call = retExpr; } else { // If the call is virtual, and has a generics context, and is not going to have a class probe, // record the context for possible use during late devirt. // // If we ever want to devirt at Tier0, and/or see issues where OSR methods under PGO lose // important devirtualizations, we'll want to allow both a class probe and a captured context. // if (origCall->IsVirtual() && (origCall->gtCallType != CT_INDIRECT) && (exactContextHnd != nullptr) && (origCall->gtClassProfileCandidateInfo == nullptr)) { JITDUMP("\nSaving context %p for call [%06u]\n", exactContextHnd, dspTreeID(origCall)); origCall->gtCallMoreFlags |= GTF_CALL_M_LATE_DEVIRT; LateDevirtualizationInfo* const info = new (this, CMK_Inlining) LateDevirtualizationInfo; info->exactContextHnd = exactContextHnd; origCall->gtLateDevirtualizationInfo = info; } if (isFatPointerCandidate) { // fatPointer candidates should be in statements of the form call() or var = call(). // Such form allows to find statements with fat calls without walking through whole trees // and removes problems with cutting trees. assert(!bIntrinsicImported); assert(IsTargetAbi(CORINFO_CORERT_ABI)); if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn. { unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli")); LclVarDsc* varDsc = lvaGetDesc(calliSlot); varDsc->lvVerTypeInfo = tiRetVal; impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE); // impAssignTempGen can change src arg list and return type for call that returns struct. var_types type = genActualType(lvaTable[calliSlot].TypeGet()); call = gtNewLclvNode(calliSlot, type); } } // For non-candidates we must also spill, since we // might have locals live on the eval stack that this // call can modify. // // Suppress this for certain well-known call targets // that we know won't modify locals, eg calls that are // recognized in gtCanOptimizeTypeEquality. Otherwise // we may break key fragile pattern matches later on. bool spillStack = true; if (call->IsCall()) { GenTreeCall* callNode = call->AsCall(); if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) || gtIsTypeHandleToRuntimeTypeHandleHelper(callNode))) { spillStack = false; } else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0) { spillStack = false; } } if (spillStack) { impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call")); } } } if (!bIntrinsicImported) { //------------------------------------------------------------------------- // /* If the call is of a small type and the callee is managed, the callee will normalize the result before returning. However, we need to normalize small type values returned by unmanaged functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here if we use the shorter inlined pinvoke stub. */ if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT)) { call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp); } } impPushOnStack(call, tiRetVal); } // VSD functions get a new call target each time we getCallInfo, so clear the cache. // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out. // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB)) // callInfoCache.uncacheCallInfo(); return callRetTyp; } #ifdef _PREFAST_ #pragma warning(pop) #endif bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv) { CorInfoType corType = methInfo->args.retType; if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY)) { // We have some kind of STRUCT being returned structPassingKind howToReturnStruct = SPK_Unknown; var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, callConv, &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { return true; } } return false; } #ifdef DEBUG // var_types Compiler::impImportJitTestLabelMark(int numArgs) { TestLabelAndNum tlAndN; if (numArgs == 2) { tlAndN.m_num = 0; StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else if (numArgs == 3) { StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_num = val->AsIntConCommon()->IconValue(); se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else { assert(false); } StackEntry expSe = impPopStack(); GenTree* node = expSe.val; // There are a small number of special cases, where we actually put the annotation on a subnode. if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100) { // A loop hoist annotation with value >= 100 means that the expression should be a static field access, // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some // offset within the the static field block whose address is returned by the helper call. // The annotation is saying that this address calculation, but not the entire access, should be hoisted. assert(node->OperGet() == GT_IND); tlAndN.m_num -= 100; GetNodeTestData()->Set(node->AsOp()->gtOp1, tlAndN); GetNodeTestData()->Remove(node); } else { GetNodeTestData()->Set(node, tlAndN); } impPushOnStack(node, expSe.seTypeInfo); return node->TypeGet(); } #endif // DEBUG //----------------------------------------------------------------------------------- // impFixupCallStructReturn: For a call node that returns a struct do one of the following: // - set the flag to indicate struct return via retbuf arg; // - adjust the return type to a SIMD type if it is returned in 1 reg; // - spill call result into a temp if it is returned into 2 registers or more and not tail call or inline candidate. // // Arguments: // call - GT_CALL GenTree node // retClsHnd - Class handle of return type of the call // // Return Value: // Returns new GenTree node after fixing struct return of call node // GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd) { if (!varTypeIsStruct(call)) { return call; } call->gtRetClsHnd = retClsHnd; #if FEATURE_MULTIREG_RET call->InitializeStructReturnType(this, retClsHnd, call->GetUnmanagedCallConv()); const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc(); const unsigned retRegCount = retTypeDesc->GetReturnRegCount(); #else // !FEATURE_MULTIREG_RET const unsigned retRegCount = 1; #endif // !FEATURE_MULTIREG_RET structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { assert(returnType == TYP_UNKNOWN); call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG; return call; } // Recognize SIMD types as we do for LCL_VARs, // note it could be not the ABI specific type, for example, on x64 we can set 'TYP_SIMD8` // for `System.Numerics.Vector2` here but lower will change it to long as ABI dictates. var_types simdReturnType = impNormStructType(call->gtRetClsHnd); if (simdReturnType != call->TypeGet()) { assert(varTypeIsSIMD(simdReturnType)); JITDUMP("changing the type of a call [%06u] from %s to %s\n", dspTreeID(call), varTypeName(call->TypeGet()), varTypeName(simdReturnType)); call->ChangeType(simdReturnType); } if (retRegCount == 1) { return call; } #if FEATURE_MULTIREG_RET assert(varTypeIsStruct(call)); // It could be a SIMD returned in several regs. assert(returnType == TYP_STRUCT); assert((howToReturnStruct == SPK_ByValueAsHfa) || (howToReturnStruct == SPK_ByValue)); #ifdef UNIX_AMD64_ABI // must be a struct returned in two registers assert(retRegCount == 2); #else // not UNIX_AMD64_ABI assert(retRegCount >= 2); #endif // not UNIX_AMD64_ABI if (!call->CanTailCall() && !call->IsInlineCandidate()) { // Force a call returning multi-reg struct to be always of the IR form // tmp = call // // No need to assign a multi-reg struct to a local var if: // - It is a tail call or // - The call is marked for in-lining later return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv())); } return call; #endif // FEATURE_MULTIREG_RET } /***************************************************************************** For struct return values, re-type the operand in the case where the ABI does not use a struct return buffer */ //------------------------------------------------------------------------ // impFixupStructReturnType: For struct return values it sets appropriate flags in MULTIREG returns case; // in non-multiref case it handles two special helpers: `CORINFO_HELP_GETFIELDSTRUCT`, `CORINFO_HELP_UNBOX_NULLABLE`. // // Arguments: // op - the return value; // retClsHnd - the struct handle; // unmgdCallConv - the calling convention of the function that returns this struct. // // Return Value: // the result tree that does the return. // GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv) { assert(varTypeIsStruct(info.compRetType)); assert(info.compRetBuffArg == BAD_VAR_NUM); JITDUMP("\nimpFixupStructReturnType: retyping\n"); DISPTREE(op); #if defined(TARGET_XARCH) #if FEATURE_MULTIREG_RET // No VarArgs for CoreCLR on x64 Unix UNIX_AMD64_ABI_ONLY(assert(!info.compIsVarArgs)); // Is method returning a multi-reg struct? if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { // In case of multi-reg struct return, we force IR to be one of the following: // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp). if (op->gtOper == GT_LCL_VAR) { // Note that this is a multi-reg return. unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { return op; } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #else assert(info.compRetNativeType != TYP_STRUCT); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_X86) #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM) if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); // Make sure this struct type stays as struct so that we can return it as an HFA lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64) // Is method returning a multi-reg struct? if (IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); if (!lvaIsImplicitByRefLocal(lclNum)) { // Make sure this struct type is not struct promoted lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #endif // FEATURE_MULTIREG_RET && TARGET_ARM64 if (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this)) { // Don't retype `struct` as a primitive type in `ret` instruction. return op; } // This must be one of those 'special' helpers that don't // really have a return buffer, but instead use it as a way // to keep the trees cleaner with fewer address-taken temps. // // Well now we have to materialize the the return buffer as // an address-taken temp. Then we can return the temp. // // NOTE: this code assumes that since the call directly // feeds the return, then the call must be returning the // same structure/class/type. // unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer")); // No need to spill anything as we're about to return. impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE); op = gtNewLclvNode(tmpNum, info.compRetType); JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n"); DISPTREE(op); return op; } /***************************************************************************** CEE_LEAVE may be jumping out of a protected block, viz, a catch or a finally-protected try. We find the finally blocks protecting the current offset (in order) by walking over the complete exception table and finding enclosing clauses. This assumes that the table is sorted. This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS. If we are leaving a catch handler, we need to attach the CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks. After this function, the BBJ_LEAVE block has been converted to a different type. */ #if !defined(FEATURE_EH_FUNCLETS) void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); unsigned encFinallies = 0; // Number of enclosing finallies. GenTree* endCatches = NULL; Statement* endLFinStmt = NULL; // The statement tree to indicate the end of locally-invoked finally. unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? * If so, we need to call CORINFO_HELP_ENDCATCH. */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) BADCODE("leave out of fault/finally block"); // Create the call to CORINFO_HELP_ENDCATCH GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID); // Make a list of all the currently pending endCatches if (endCatches) endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch); else endCatches = endCatch; #ifdef DEBUG if (verbose) { printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to " "CORINFO_HELP_ENDCATCH\n", block->bbNum, XTnum); } #endif } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* This is a finally-protected try we are jumping out of */ /* If there are any pending endCatches, and we have already jumped out of a finally-protected try, then the endCatches have to be put in a block in an outer try for async exceptions to work correctly. Else, just use append to the original block */ BasicBlock* callBlock; assert(!encFinallies == !endLFinStmt); // if we have finallies, we better have an endLFin tree, and vice-versa if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY " "block %s\n", callBlock->dspToString()); } #endif } else { assert(step != DUMMY_INIT(NULL)); /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); assert(step->bbJumpKind == BBJ_ALWAYS); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n", callBlock->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } // note that this sets BBF_IMPORTED on the block impEndTreeList(callBlock, endLFinStmt, lastStmt); } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n", step->dspToString()); } #endif unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel; assert(finallyNesting <= compHndBBtabCount); callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting); endLFinStmt = gtNewStmt(endLFin); endCatches = NULL; encFinallies++; invalidatePreds = true; } } /* Append any remaining endCatches, if any */ assert(!encFinallies == !endLFinStmt); if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS " "block %s\n", block->dspToString()); } #endif } else { // If leaveTarget is the start of another try block, we want to make sure that // we do not insert finalStep into that try block. Hence, we find the enclosing // try block. unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget); // Insert a new BB either in the try region indicated by tryIndex or // the handler region indicated by leaveTarget->bbHndIndex, // depending on which is the inner region. BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step); finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS; step->bbJumpDest = finalStep; /* The new block will inherit this block's weight */ finalStep->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies, finalStep->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } impEndTreeList(finalStep, endLFinStmt, lastStmt); finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE // Queue up the jump target for importing impImportBlockPending(leaveTarget); invalidatePreds = true; } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #else // FEATURE_EH_FUNCLETS void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum, block->bbJumpDest->bbNum); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; enum StepType { // No step type; step == NULL. ST_None, // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair? // That is, is step->bbJumpDest where a finally will return to? ST_FinallyReturn, // The step block is a catch return. ST_Catch, // The step block is in a "try", created as the target for a finally return or the target for a catch return. ST_Try }; StepType stepType = ST_None; unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) { BADCODE("leave out of fault/finally block"); } /* We are jumping out of a catch */ if (step == nullptr) { step = block; step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET stepType = ST_Catch; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB " to BBJ_EHCATCHRET " "block\n", XTnum, step->bbNum); } #endif } else { BasicBlock* exitBlock; /* Create a new catch exit block in the catch region for the existing step block to jump to in this * scope */ exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step); assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch // exit) returns to this block step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ exitBlock->inheritWeight(block); exitBlock->bbFlags |= BBF_IMPORTED; /* This exit block is the new step */ step = exitBlock; stepType = ST_Catch; invalidatePreds = true; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n", XTnum, exitBlock->bbNum); } #endif } } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* We are jumping out of a finally-protected try */ BasicBlock* callBlock; if (step == nullptr) { #if FEATURE_EH_CALLFINALLY_THUNKS // Put the call to the finally in the enclosing region. unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block); // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = callBlock; block->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n", XTnum, block->bbNum, callBlock->bbNum); } #endif #else // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_CALLFINALLY block\n", XTnum, callBlock->bbNum); } #endif #endif // !FEATURE_EH_CALLFINALLY_THUNKS } else { // Calling the finally block. We already have a step block that is either the call-to-finally from a // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by // a 'finally'), or the step block is the return from a catch. // // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will // automatically re-raise the exception, using the return address of the catch (that is, the target // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64, // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly // within the 'try' region protected by the finally, since we generate code in such a way that execution // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on // stack walks.) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS if (step->bbJumpKind == BBJ_EHCATCHRET) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = step2; step->bbJumpDest->bbRefs++; step2->inheritWeight(block); step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is " "BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n", XTnum, step->bbNum, step2->bbNum); } #endif step = step2; assert(stepType == ST_Catch); // Leave it as catch type for now. } #endif // FEATURE_EH_CALLFINALLY_THUNKS #if FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; #else // !FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = XTnum + 1; unsigned callFinallyHndIndex = 0; // don't care #endif // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY " "block " FMT_BB "\n", XTnum, callBlock->bbNum); } #endif } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); stepType = ST_FinallyReturn; /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) " "block " FMT_BB "\n", XTnum, step->bbNum); } #endif callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. invalidatePreds = true; } else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { // We are jumping out of a catch-protected try. // // If we are returning from a call to a finally, then we must have a step block within a try // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the // finally raises an exception), the VM will find this step block, notice that it is in a protected region, // and invoke the appropriate catch. // // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception), // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM, // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target // address of the catch return as the new exception address. That is, the re-raised exception appears to // occur at the catch return address. If this exception return address skips an enclosing try/catch that // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should. // For example: // // try { // try { // // something here raises ThreadAbortException // LEAVE LABEL_1; // no need to stop at LABEL_2 // } catch (Exception) { // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode. // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only // // need to do this transformation if the current EH block is a try/catch that catches // // ThreadAbortException (or one of its parents), however we might not be able to find that // // information, so currently we do it for all catch types. // LEAVE LABEL_1; // Convert this to LEAVE LABEL2; // } // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code // } catch (ThreadAbortException) { // } // LABEL_1: // // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C# // compiler. if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch)) { BasicBlock* catchStep; assert(step); if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); } else { assert(stepType == ST_Catch); assert(step->bbJumpKind == BBJ_EHCATCHRET); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = catchStep; step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ catchStep->inheritWeight(block); catchStep->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { if (stepType == ST_FinallyReturn) { printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } else { assert(stepType == ST_Catch); printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } } #endif // DEBUG /* This block is the new step */ step = catchStep; stepType = ST_Try; invalidatePreds = true; } } } if (step == nullptr) { block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE " "block " FMT_BB " to BBJ_ALWAYS\n", block->bbNum); } #endif } else { step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) #ifdef DEBUG if (verbose) { printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum); } #endif // Queue up the jump target for importing impImportBlockPending(leaveTarget); } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #endif // FEATURE_EH_FUNCLETS /*****************************************************************************/ // This is called when reimporting a leave block. It resets the JumpKind, // JumpDest, and bbNext to the original values void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) { #if defined(FEATURE_EH_FUNCLETS) // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1) // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0, // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the // only predecessor are also considered orphans and attempted to be deleted. // // try { // .... // try // { // .... // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1 // } finally { } // } finally { } // OUTSIDE: // // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block. // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. if (block->bbJumpKind == BBJ_CALLFINALLY) { BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; dupBlock->copyEHRegion(block); dupBlock->bbCatchTyp = block->bbCatchTyp; // Mark this block as // a) not referenced by any other block to make sure that it gets deleted // b) weight zero // c) prevent from being imported // d) as internal // e) as rarely run dupBlock->bbRefs = 0; dupBlock->bbWeight = BB_ZERO_WEIGHT; dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY; // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS // will be next to each other. fgInsertBBafter(block, dupBlock); #ifdef DEBUG if (verbose) { printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum); } #endif } #endif // FEATURE_EH_FUNCLETS block->bbJumpKind = BBJ_LEAVE; fgInitBBLookup(); block->bbJumpDest = fgLookupBB(jmpAddr); // We will leave the BBJ_ALWAYS block we introduced. When it's reimported // the BBJ_ALWAYS block will be unreachable, and will be removed after. The // reason we don't want to remove the block at this point is that if we call // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be // added and the linked list length will be different than fgBBcount. } /*****************************************************************************/ // Get the first non-prefix opcode. Used for verification of valid combinations // of prefixes and actual opcodes. OPCODE Compiler::impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp) { while (codeAddr < codeEndp) { OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); if (opcode == CEE_PREFIX1) { if (codeAddr >= codeEndp) { break; } opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); codeAddr += sizeof(__int8); } switch (opcode) { case CEE_UNALIGNED: case CEE_VOLATILE: case CEE_TAILCALL: case CEE_CONSTRAINED: case CEE_READONLY: break; default: return opcode; } codeAddr += opcodeSizes[opcode]; } return CEE_ILLEGAL; } /*****************************************************************************/ // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes void Compiler::impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix) { OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!( // Opcode of all ldind and stdind happen to be in continuous, except stind.i. ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) || (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) || (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) || // volatile. prefix is allowed with the ldsfld and stsfld (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD))))) { BADCODE("Invalid opcode for unaligned. or volatile. prefix"); } } /*****************************************************************************/ #ifdef DEBUG #undef RETURN // undef contracts RETURN macro enum controlFlow_t { NEXT, CALL, RETURN, THROW, BRANCH, COND_BRANCH, BREAK, PHI, META, }; const static controlFlow_t controlFlow[] = { #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow, #include "opcode.def" #undef OPDEF }; #endif // DEBUG /***************************************************************************** * Determine the result type of an arithemetic operation * On 64-bit inserts upcasts when native int is mixed with int32 */ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2) { var_types type = TYP_UNDEF; GenTree* op1 = *pOp1; GenTree* op2 = *pOp2; // Arithemetic operations are generally only allowed with // primitive types, but certain operations are allowed // with byrefs if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref1-byref2 => gives a native int type = TYP_I_IMPL; } else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // [native] int - byref => gives a native int // // The reason is that it is possible, in managed C++, // to have a tree like this: // // - // / \. // / \. // / \. // / \. // const(h) int addr byref // // <BUGNUM> VSW 318822 </BUGNUM> // // So here we decide to make the resulting type to be a native int. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_I_IMPL; } else { // byref - [native] int => gives a byref assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if ((genActualType(op2->TypeGet()) != TYP_I_IMPL)) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } } else if ((oper == GT_ADD) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref + [native] int => gives a byref // (or) // [native] int + byref => gives a byref // only one can be a byref : byref op byref not allowed assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if (genActualType(op2->TypeGet()) == TYP_BYREF) { if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } #ifdef TARGET_64BIT else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long // we get this because in the IL the long isn't Int64, it's just IntPtr if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } type = TYP_I_IMPL; } #else // 32-bit TARGET else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long type = TYP_LONG; } #endif // TARGET_64BIT else { // int + int => gives an int assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); type = genActualType(op1->gtType); // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT. // Otherwise, turn floats into doubles if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT)) { assert(genActualType(op2->gtType) == TYP_DOUBLE); type = TYP_DOUBLE; } } assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT); return type; } //------------------------------------------------------------------------ // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting // // Arguments: // op1 - value to cast // pResolvedToken - resolved token for type to cast to // isCastClass - true if this is a castclass, false if isinst // // Return Value: // tree representing optimized cast, or null if no optimization possible GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass) { assert(op1->TypeGet() == TYP_REF); // Don't optimize for minopts or debug codegen. if (opts.OptimizationDisabled()) { return nullptr; } // See what we know about the type of the object being cast. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull); if (fromClass != nullptr) { CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass; JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst", isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass), info.compCompHnd->getClassName(toClass)); // Perhaps we know if the cast will succeed or fail. TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass); if (castResult == TypeCompareState::Must) { // Cast will succeed, result is simply op1. JITDUMP("Cast will succeed, optimizing to simply return input\n"); return op1; } else if (castResult == TypeCompareState::MustNot) { // See if we can sharpen exactness by looking for final classes if (!isExact) { isExact = impIsClassExact(fromClass); } // Cast to exact type will fail. Handle case where we have // an exact type (that is, fromClass is not a subtype) // and we're not going to throw on failure. if (isExact && !isCastClass) { JITDUMP("Cast will fail, optimizing to return null\n"); GenTree* result = gtNewIconNode(0, TYP_REF); // If the cast was fed by a box, we can remove that too. if (op1->IsBoxedValue()) { JITDUMP("Also removing upstream box\n"); gtTryRemoveBoxUpstreamEffects(op1); } return result; } else if (isExact) { JITDUMP("Not optimizing failing castclass (yet)\n"); } else { JITDUMP("Can't optimize since fromClass is inexact\n"); } } else { JITDUMP("Result of cast unknown, must generate runtime test\n"); } } else { JITDUMP("\nCan't optimize since fromClass is unknown\n"); } return nullptr; } //------------------------------------------------------------------------ // impCastClassOrIsInstToTree: build and import castclass/isinst // // Arguments: // op1 - value to cast // op2 - type handle for type to cast to // pResolvedToken - resolved token from the cast operation // isCastClass - true if this is castclass, false means isinst // // Return Value: // Tree representing the cast // // Notes: // May expand into a series of runtime checks or a helper call. GenTree* Compiler::impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset) { assert(op1->TypeGet() == TYP_REF); // Optimistically assume the jit should expand this as an inline test bool shouldExpandInline = true; // Profitability check. // // Don't bother with inline expansion when jit is trying to // generate code quickly, or the cast is in code that won't run very // often, or the method already is pretty big. if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { // not worth the code expansion if jitting fast or in a rarely run block shouldExpandInline = false; } else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals()) { // not worth creating an untracked local variable shouldExpandInline = false; } else if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && (JitConfig.JitCastProfiling() == 1)) { // Optimizations are enabled but we're still instrumenting (including casts) if (isCastClass && !impIsClassExact(pResolvedToken->hClass)) { // Usually, we make a speculative assumption that it makes sense to expand castclass // even for non-sealed classes, but let's rely on PGO in this specific case shouldExpandInline = false; } } // Pessimistically assume the jit cannot expand this as an inline test bool canExpandInline = false; const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass); // Legality check. // // Not all classclass/isinst operations can be inline expanded. // Check legality only if an inline expansion is desirable. if (shouldExpandInline) { if (isCastClass) { // Jit can only inline expand the normal CHKCASTCLASS helper. canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS); } else { if (helper == CORINFO_HELP_ISINSTANCEOFCLASS) { // If the class is exact, the jit can expand the IsInst check inline. canExpandInline = impIsClassExact(pResolvedToken->hClass); } } } const bool expandInline = canExpandInline && shouldExpandInline; if (!expandInline) { JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // If we CSE this class handle we prevent assertionProp from making SubType assertions // so instead we force the CSE logic to not consider CSE-ing this class handle. // op2->gtFlags |= GTF_DONT_CSE; GenTreeCall* call = gtNewHelperCallNode(helper, TYP_REF, gtNewCallArgs(op2, op1)); if (impIsCastHelperEligibleForClassProbe(call) && !impIsClassExact(pResolvedToken->hClass)) { ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return call; } JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst"); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2")); GenTree* temp; GenTree* condMT; // // expand the methodtable match: // // condMT ==> GT_NE // / \. // GT_IND op2 (typically CNS_INT) // | // op1Copy // // This can replace op1 with a GT_COMMA that evaluates op1 into a local // op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1")); // // op1 is now known to be a non-complex tree // thus we can use gtClone(op1) from now on // GenTree* op2Var = op2; if (isCastClass) { op2Var = fgInsertCommaFormTemp(&op2); lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true; } temp = gtNewMethodTableLookup(temp); condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2); GenTree* condNull; // // expand the null check: // // condNull ==> GT_EQ // / \. // op1Copy CNS_INT // null // condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF)); // // expand the true and false trees for the condMT // GenTree* condFalse = gtClone(op1); GenTree* condTrue; if (isCastClass) { // // use the special helper that skips the cases checked by our inlined cast // const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL; condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewCallArgs(op2Var, gtClone(op1))); } else { condTrue = gtNewIconNode(0, TYP_REF); } GenTree* qmarkMT; // // Generate first QMARK - COLON tree // // qmarkMT ==> GT_QMARK // / \. // condMT GT_COLON // / \. // condFalse condTrue // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse); qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp->AsColon()); if (isCastClass && impIsClassExact(pResolvedToken->hClass) && condTrue->OperIs(GT_CALL)) { // condTrue is used only for throwing InvalidCastException in case of casting to an exact class. condTrue->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; } GenTree* qmarkNull; // // Generate second QMARK - COLON tree // // qmarkNull ==> GT_QMARK // / \. // condNull GT_COLON // / \. // qmarkMT op1Copy // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT); qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp->AsColon()); qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF; // Make QMark node a top level node by spilling it. unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2")); impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE); // TODO-CQ: Is it possible op1 has a better type? // // See also gtGetHelperCallClassHandle where we make the same // determination for the helper call variants. LclVarDsc* lclDsc = lvaGetDesc(tmp); assert(lclDsc->lvSingleDef == 0); lclDsc->lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); lvaSetClass(tmp, pResolvedToken->hClass); return gtNewLclvNode(tmp, TYP_REF); } #ifndef DEBUG #define assertImp(cond) ((void)0) #else #define assertImp(cond) \ do \ { \ if (!(cond)) \ { \ const int cchAssertImpBuf = 600; \ char* assertImpBuf = (char*)_alloca(cchAssertImpBuf); \ _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \ "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \ impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \ op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \ assertAbort(assertImpBuf, __FILE__, __LINE__); \ } \ } while (0) #endif // DEBUG //------------------------------------------------------------------------ // impBlockIsInALoop: check if a block might be in a loop // // Arguments: // block - block to check // // Returns: // true if the block might be in a loop. // // Notes: // Conservatively correct; may return true for some blocks that are // not actually in loops. // bool Compiler::impBlockIsInALoop(BasicBlock* block) { return (compIsForInlining() && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0); } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif /***************************************************************************** * Import the instr for the given basic block */ void Compiler::impImportBlockCode(BasicBlock* block) { #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind) #ifdef DEBUG if (verbose) { printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName); } #endif unsigned nxtStmtIndex = impInitBlockLineInfo(); IL_OFFSET nxtStmtOffs; CorInfoHelpFunc helper; CorInfoIsAccessAllowedResult accessAllowedResult; CORINFO_HELPER_DESC calloutHelper; const BYTE* lastLoadToken = nullptr; /* Get the tree list started */ impBeginTreeList(); #ifdef FEATURE_ON_STACK_REPLACEMENT bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0); #ifdef DEBUG // Optionally suppress patchpoints by method hash // static ConfigMethodRange JitEnablePatchpointRange; JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); const bool inRange = JitEnablePatchpointRange.Contains(hash); enablePatchpoints &= inRange; #endif // DEBUG if (enablePatchpoints) { // We don't inline at Tier0, if we do, we may need rethink our approach. // Could probably support inlines that don't introduce flow. // assert(!compIsForInlining()); // OSR is not yet supported for methods with explicit tail calls. // // But we also do not have to switch these methods to be optimized as we should be // able to avoid getting trapped in Tier0 code by normal call counting. // So instead, just suppress adding patchpoints. // if (!compTailPrefixSeen) { // The normaly policy is only to add patchpoints to the targets of lexically // backwards branches. // if (compHasBackwardJump) { assert(compCanHavePatchpoints()); // Is the start of this block a suitable patchpoint? // if (((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) && (verCurrentState.esStackDepth == 0)) { // We should have noted this earlier and bailed out of OSR. // assert(!block->hasHndIndex()); block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); } } else { // Should not see backward branch targets w/o backwards branches assert((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == 0); } } #ifdef DEBUG // As a stress test, we can place patchpoints at the start of any block // that is a stack empty point and is not within a handler. // // Todo: enable for mid-block stack empty points too. // const int offsetOSR = JitConfig.JitOffsetOnStackReplacement(); const int randomOSR = JitConfig.JitRandomOnStackReplacement(); const bool tryOffsetOSR = offsetOSR >= 0; const bool tryRandomOSR = randomOSR > 0; if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0)) { // Block start can have a patchpoint. See if we should add one. // bool addPatchpoint = false; // Specific offset? // if (tryOffsetOSR) { if (impCurOpcOffs == (unsigned)offsetOSR) { addPatchpoint = true; } } // Random? // else { // Reuse the random inliner's random state. // Note m_inlineStrategy is always created, even if we're not inlining. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(randomOSR); const int randomValue = (int)random->Next(100); addPatchpoint = (randomValue < randomOSR); } if (addPatchpoint) { block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); } JITDUMP("\n** %s patchpoint%s added to " FMT_BB " (il offset %u)\n", tryOffsetOSR ? "offset" : "random", addPatchpoint ? "" : " not", block->bbNum, impCurOpcOffs); } #endif // DEBUG } // Mark stack-empty rare blocks to be considered for partial compilation. // // Ideally these are conditionally executed blocks -- if the method is going // to unconditionally throw, there's not as much to be gained by deferring jitting. // For now, we just screen out the entry bb. // // In general we might want track all the IL stack empty points so we can // propagate rareness back through flow and place the partial compilation patchpoints "earlier" // so there are fewer overall. // // Note unlike OSR, it's ok to forgo these. // // Todo: stress mode... // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) && compCanHavePatchpoints() && !compTailPrefixSeen) { // Is this block a good place for partial compilation? // if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) && ((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex()) { JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum); block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT; setMethodHasPartialCompilationPatchpoint(); // Change block to BBJ_THROW so we won't trigger importation of successors. // block->bbJumpKind = BBJ_THROW; // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. // if (info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE)) { lvaGenericsContextInUse = true; } return; } } #endif // FEATURE_ON_STACK_REPLACEMENT /* Walk the opcodes that comprise the basic block */ const BYTE* codeAddr = info.compCode + block->bbCodeOffs; const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd; IL_OFFSET opcodeOffs = block->bbCodeOffs; IL_OFFSET lastSpillOffs = opcodeOffs; signed jmpDist; /* remember the start of the delegate creation sequence (used for verification) */ const BYTE* delegateCreateStart = nullptr; int prefixFlags = 0; bool explicitTailCall, constraintCall, readonlyCall; typeInfo tiRetVal; unsigned numArgs = info.compArgsCount; /* Now process all the opcodes in the block */ var_types callTyp = TYP_COUNT; OPCODE prevOpcode = CEE_ILLEGAL; if (block->bbCatchTyp) { if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { impCurStmtOffsSet(block->bbCodeOffs); } // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block // to a temp. This is a trade off for code simplicity impSpillSpecialSideEff(); } while (codeAddr < codeEndp) { #ifdef FEATURE_READYTORUN bool usingReadyToRunHelper = false; #endif CORINFO_RESOLVED_TOKEN resolvedToken; CORINFO_RESOLVED_TOKEN constrainedResolvedToken; CORINFO_CALL_INFO callInfo; CORINFO_FIELD_INFO fieldInfo; tiRetVal = typeInfo(); // Default type info //--------------------------------------------------------------------- /* We need to restrict the max tree depth as many of the Compiler functions are recursive. We do this by spilling the stack */ if (verCurrentState.esStackDepth) { /* Has it been a while since we last saw a non-empty stack (which guarantees that the tree depth isnt accumulating. */ if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode)) { impSpillStackEnsure(); lastSpillOffs = opcodeOffs; } } else { lastSpillOffs = opcodeOffs; impBoxTempInUse = false; // nothing on the stack, box temp OK to use again } /* Compute the current instr offset */ opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); #ifndef DEBUG if (opts.compDbgInfo) #endif { nxtStmtOffs = (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET; /* Have we reached the next stmt boundary ? */ if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs) { assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]); if (verCurrentState.esStackDepth != 0 && opts.compDbgCode) { /* We need to provide accurate IP-mapping at this point. So spill anything on the stack so that it will form gtStmts with the correct stmt offset noted */ impSpillStackEnsure(true); } // Have we reported debug info for any tree? if (impCurStmtDI.IsValid() && opts.compDbgCode) { GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); assert(!impCurStmtDI.IsValid()); } if (!impCurStmtDI.IsValid()) { /* Make sure that nxtStmtIndex is in sync with opcodeOffs. If opcodeOffs has gone past nxtStmtIndex, catch up */ while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount && info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs) { nxtStmtIndex++; } /* Go to the new stmt */ impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]); /* Update the stmt boundary index */ nxtStmtIndex++; assert(nxtStmtIndex <= info.compStmtOffsetsCount); /* Are there any more line# entries after this one? */ if (nxtStmtIndex < info.compStmtOffsetsCount) { /* Remember where the next line# starts */ nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex]; } else { /* No more line# entries */ nxtStmtOffs = BAD_IL_OFFSET; } } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) && (verCurrentState.esStackDepth == 0)) { /* At stack-empty locations, we have already added the tree to the stmt list with the last offset. We just need to update impCurStmtDI */ impCurStmtOffsSet(opcodeOffs); } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) && impOpcodeIsCallSiteBoundary(prevOpcode)) { /* Make sure we have a type cached */ assert(callTyp != TYP_COUNT); if (callTyp == TYP_VOID) { impCurStmtOffsSet(opcodeOffs); } else if (opts.compDbgCode) { impSpillStackEnsure(true); impCurStmtOffsSet(opcodeOffs); } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP)) { if (opts.compDbgCode) { impSpillStackEnsure(true); } impCurStmtOffsSet(opcodeOffs); } assert(!impCurStmtDI.IsValid() || (nxtStmtOffs == BAD_IL_OFFSET) || (impCurStmtDI.GetLocation().GetOffset() <= nxtStmtOffs)); } CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL); var_types lclTyp, ovflType = TYP_UNKNOWN; GenTree* op1 = DUMMY_INIT(NULL); GenTree* op2 = DUMMY_INIT(NULL); GenTree* newObjThisPtr = DUMMY_INIT(NULL); bool uns = DUMMY_INIT(false); bool isLocal = false; /* Get the next opcode and the size of its parameters */ OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); #ifdef DEBUG impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs); #endif DECODE_OPCODE: // Return if any previous code has caused inline to fail. if (compDonotInline()) { return; } /* Get the size of additional parameters */ signed int sz = opcodeSizes[opcode]; #ifdef DEBUG clsHnd = NO_CLASS_HANDLE; lclTyp = TYP_COUNT; callTyp = TYP_COUNT; impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); impCurOpcName = opcodeNames[opcode]; if (verbose && (opcode != CEE_PREFIX1)) { printf("%s", impCurOpcName); } /* Use assertImp() to display the opcode */ op1 = op2 = nullptr; #endif /* See what kind of an opcode we have, then */ unsigned mflags = 0; unsigned clsFlags = 0; switch (opcode) { unsigned lclNum; var_types type; GenTree* op3; genTreeOps oper; unsigned size; int val; CORINFO_SIG_INFO sig; IL_OFFSET jmpAddr; bool ovfl, unordered, callNode; bool ldstruct; CORINFO_CLASS_HANDLE tokenType; union { int intVal; float fltVal; __int64 lngVal; double dblVal; } cval; case CEE_PREFIX1: opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; SPILL_APPEND: // We need to call impSpillLclRefs() for a struct type lclVar. // This is because there may be loads of that lclVar on the evaluation stack, and // we need to ensure that those loads are completed before we modify it. if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtGetOp1())) { GenTree* lhs = op1->gtGetOp1(); GenTreeLclVarCommon* lclVar = nullptr; if (lhs->gtOper == GT_LCL_VAR) { lclVar = lhs->AsLclVarCommon(); } else if (lhs->OperIsBlk()) { // Check if LHS address is within some struct local, to catch // cases where we're updating the struct by something other than a stfld GenTree* addr = lhs->AsBlk()->Addr(); // Catches ADDR(LCL_VAR), or ADD(ADDR(LCL_VAR),CNS_INT)) lclVar = addr->IsLocalAddrExpr(); // Catches ADDR(FIELD(... ADDR(LCL_VAR))) if (lclVar == nullptr) { GenTree* lclTree = nullptr; if (impIsAddressInLocal(addr, &lclTree)) { lclVar = lclTree->AsLclVarCommon(); } } } if (lclVar != nullptr) { impSpillLclRefs(lclVar->GetLclNum()); } } /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); goto DONE_APPEND; APPEND: /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); goto DONE_APPEND; DONE_APPEND: #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif break; case CEE_LDNULL: impPushNullObjRefOnStack(); break; case CEE_LDC_I4_M1: case CEE_LDC_I4_0: case CEE_LDC_I4_1: case CEE_LDC_I4_2: case CEE_LDC_I4_3: case CEE_LDC_I4_4: case CEE_LDC_I4_5: case CEE_LDC_I4_6: case CEE_LDC_I4_7: case CEE_LDC_I4_8: cval.intVal = (opcode - CEE_LDC_I4_0); assert(-1 <= cval.intVal && cval.intVal <= 8); goto PUSH_I4CON; case CEE_LDC_I4_S: cval.intVal = getI1LittleEndian(codeAddr); goto PUSH_I4CON; case CEE_LDC_I4: cval.intVal = getI4LittleEndian(codeAddr); goto PUSH_I4CON; PUSH_I4CON: JITDUMP(" %d", cval.intVal); impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT)); break; case CEE_LDC_I8: cval.lngVal = getI8LittleEndian(codeAddr); JITDUMP(" 0x%016llx", cval.lngVal); impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG)); break; case CEE_LDC_R8: cval.dblVal = getR8LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE)); break; case CEE_LDC_R4: cval.dblVal = getR4LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal, TYP_FLOAT), typeInfo(TI_DOUBLE)); break; case CEE_LDSTR: val = getU4LittleEndian(codeAddr); JITDUMP(" %08X", val); impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal); break; case CEE_LDARG: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: lclNum = (opcode - CEE_LDARG_0); assert(lclNum >= 0 && lclNum < 4); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_0: case CEE_LDLOC_1: case CEE_LDLOC_2: case CEE_LDLOC_3: lclNum = (opcode - CEE_LDLOC_0); assert(lclNum >= 0 && lclNum < 4); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_STARG: lclNum = getU2LittleEndian(codeAddr); goto STARG; case CEE_STARG_S: lclNum = getU1LittleEndian(codeAddr); STARG: JITDUMP(" %u", lclNum); if (compIsForInlining()) { op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); noway_assert(op1->gtOper == GT_LCL_VAR); lclNum = op1->AsLclVar()->GetLclNum(); goto VAR_ST_VALID; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } // We should have seen this arg write in the prescan assert(lvaTable[lclNum].lvHasILStoreOp); goto VAR_ST; case CEE_STLOC: lclNum = getU2LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_S: lclNum = getU1LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: isLocal = true; lclNum = (opcode - CEE_STLOC_0); assert(lclNum >= 0 && lclNum < 4); LOC_ST: if (compIsForInlining()) { lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp")); goto _PopValue; } lclNum += numArgs; VAR_ST: if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var) { BADCODE("Bad IL"); } VAR_ST_VALID: /* if it is a struct assignment, make certain we don't overflow the buffer */ assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd)); if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } _PopValue: /* Pop the value being assigned */ { StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; tiRetVal = se.seTypeInfo; } #ifdef FEATURE_SIMD if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet())) { assert(op1->TypeGet() == TYP_STRUCT); op1->gtType = lclTyp; } #endif // FEATURE_SIMD op1 = impImplicitIorI4Cast(op1, lclTyp); #ifdef TARGET_64BIT // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT)) { op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT); } #endif // TARGET_64BIT // We had better assign it a value of the correct type assertImp( genActualType(lclTyp) == genActualType(op1->gtType) || (genActualType(lclTyp) == TYP_I_IMPL && op1->IsLocalAddrExpr() != nullptr) || (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) || (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) || (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) || ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF)); /* If op1 is "&var" then its type is the transient "*" and it can be used either as TYP_BYREF or TYP_I_IMPL */ if (op1->IsLocalAddrExpr() != nullptr) { assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF); /* When "&var" is created, we assume it is a byref. If it is being assigned to a TYP_I_IMPL var, change the type to prevent unnecessary GC info */ if (genActualType(lclTyp) == TYP_I_IMPL) { op1->gtType = TYP_I_IMPL; } } // If this is a local and the local is a ref type, see // if we can improve type information based on the // value being assigned. if (isLocal && (lclTyp == TYP_REF)) { // We should have seen a stloc in our IL prescan. assert(lvaTable[lclNum].lvHasILStoreOp); // Is there just one place this local is defined? const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef; // Conservative check that there is just one // definition that reaches this store. const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0); if (isSingleDefLocal && hasSingleReachingDef) { lvaUpdateClass(lclNum, op1, clsHnd); } } /* Filter out simple assignments to itself */ if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum()) { if (opts.compDbgCode) { op1 = gtNewNothingNode(); goto SPILL_APPEND; } else { break; } } /* Create the assignment node */ op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1)); /* If the local is aliased or pinned, we need to spill calls and indirections from the stack. */ if ((lvaTable[lclNum].IsAddressExposed() || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) && (verCurrentState.esStackDepth > 0)) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned")); } /* Spill any refs to the local from the stack */ impSpillLclRefs(lclNum); // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op2' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet()); } if (varTypeIsStruct(lclTyp)) { op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL); } else { op1 = gtNewAssignNode(op2, op1); } goto SPILL_APPEND; case CEE_LDLOCA: lclNum = getU2LittleEndian(codeAddr); goto LDLOCA; case CEE_LDLOCA_S: lclNum = getU1LittleEndian(codeAddr); LDLOCA: JITDUMP(" %u", lclNum); if (compIsForInlining()) { // Get the local type lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp")); assert(!lvaGetDesc(lclNum)->lvNormalizeOnLoad()); op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum)); goto _PUSH_ADRVAR; } lclNum += numArgs; assertImp(lclNum < info.compLocalsCount); goto ADRVAR; case CEE_LDARGA: lclNum = getU2LittleEndian(codeAddr); goto LDARGA; case CEE_LDARGA_S: lclNum = getU1LittleEndian(codeAddr); LDARGA: JITDUMP(" %u", lclNum); Verify(lclNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument, // followed by a ldfld to load the field. op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); if (op1->gtOper != GT_LCL_VAR) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR); return; } assert(op1->gtOper == GT_LCL_VAR); goto _PUSH_ADRVAR; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } goto ADRVAR; ADRVAR: op1 = impCreateLocalNode(lclNum DEBUGARG(opcodeOffs + sz + 1)); _PUSH_ADRVAR: assert(op1->gtOper == GT_LCL_VAR); /* Note that this is supposed to create the transient type "*" which may be used as a TYP_I_IMPL. However we catch places where it is used as a TYP_I_IMPL and change the node if needed. Thus we are pessimistic and may report byrefs in the GC info where it was not absolutely needed, but it is safer this way. */ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does assert((op1->gtFlags & GTF_GLOB_REF) == 0); tiRetVal = lvaTable[lclNum].lvVerTypeInfo; impPushOnStack(op1, tiRetVal); break; case CEE_ARGLIST: if (!info.compIsVarArgs) { BADCODE("arglist in non-vararg method"); } assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG); /* The ARGLIST cookie is a hidden 'last' parameter, we have already adjusted the arg count cos this is like fetching the last param */ assertImp(0 < numArgs); lclNum = lvaVarargsHandleArg; op1 = gtNewLclvNode(lclNum, TYP_I_IMPL DEBUGARG(opcodeOffs + sz + 1)); op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); impPushOnStack(op1, tiRetVal); break; case CEE_ENDFINALLY: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY); return; } if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } if (info.compXcptnsCount == 0) { BADCODE("endfinally outside finally"); } assert(verCurrentState.esStackDepth == 0); op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr); goto APPEND; case CEE_ENDFILTER: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER); return; } block->bbSetRunRarely(); // filters are rare if (info.compXcptnsCount == 0) { BADCODE("endfilter outside filter"); } op1 = impPopStack().val; assertImp(op1->gtType == TYP_INT); if (!bbInFilterILRange(block)) { BADCODE("EndFilter outside a filter handler"); } /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET); /* Mark catch handler as successor */ op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1); if (verCurrentState.esStackDepth != 0) { verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__) DEBUGARG(__LINE__)); } goto APPEND; case CEE_RET: prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it RET: if (!impReturnInstruction(prefixFlags, opcode)) { return; // abort } else { break; } case CEE_JMP: assert(!compIsForInlining()); if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex()) { /* CEE_JMP does not make sense in some "protected" regions. */ BADCODE("Jmp not allowed in protected region"); } if (opts.IsReversePInvoke()) { BADCODE("Jmp not allowed in reverse P/Invoke"); } if (verCurrentState.esStackDepth != 0) { BADCODE("Stack must be empty after CEE_JMPs"); } _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); /* The signature of the target has to be identical to ours. At least check that argCnt and returnType match */ eeGetMethodSig(resolvedToken.hMethod, &sig); if (sig.numArgs != info.compMethodInfo->args.numArgs || sig.retType != info.compMethodInfo->args.retType || sig.callConv != info.compMethodInfo->args.callConv) { BADCODE("Incompatible target for CEE_JMPs"); } op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod); /* Mark the basic block as being a JUMP instead of RETURN */ block->bbFlags |= BBF_HAS_JMP; /* Set this flag to make sure register arguments have a location assigned * even if we don't use them inside the method */ compJmpOpUsed = true; fgNoStructPromotion = true; goto APPEND; case CEE_LDELEMA: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a value class array we just do a simple address-of if (eeIsValueClass(ldelemClsHnd)) { CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd); if (cit == CORINFO_TYPE_UNDEF) { lclTyp = TYP_STRUCT; } else { lclTyp = JITtype2varType(cit); } goto ARR_LD_POST_VERIFY; } // Similarly, if its a readonly access, we can do a simple address-of // without doing a runtime type-check if (prefixFlags & PREFIX_READONLY) { lclTyp = TYP_REF; goto ARR_LD_POST_VERIFY; } // Otherwise we need the full helper function with run-time type check op1 = impTokenToHandle(&resolvedToken); if (op1 == nullptr) { // compDonotInline() return; } { GenTreeCall::Use* args = gtNewCallArgs(op1); // Type args = gtPrependNewCallArg(impPopStack().val, args); // index args = gtPrependNewCallArg(impPopStack().val, args); // array op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args); } impPushOnStack(op1, tiRetVal); break; // ldelem for reference and value types case CEE_LDELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a reference type or generic variable type // then just generate code as though it's a ldelem.ref instruction if (!eeIsValueClass(ldelemClsHnd)) { lclTyp = TYP_REF; opcode = CEE_LDELEM_REF; } else { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd); lclTyp = JITtype2varType(jitTyp); tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct tiRetVal.NormaliseForStack(); } goto ARR_LD_POST_VERIFY; case CEE_LDELEM_I1: lclTyp = TYP_BYTE; goto ARR_LD; case CEE_LDELEM_I2: lclTyp = TYP_SHORT; goto ARR_LD; case CEE_LDELEM_I: lclTyp = TYP_I_IMPL; goto ARR_LD; // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter // and treating it as TYP_INT avoids other asserts. case CEE_LDELEM_U4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I8: lclTyp = TYP_LONG; goto ARR_LD; case CEE_LDELEM_REF: lclTyp = TYP_REF; goto ARR_LD; case CEE_LDELEM_R4: lclTyp = TYP_FLOAT; goto ARR_LD; case CEE_LDELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_LD; case CEE_LDELEM_U1: lclTyp = TYP_UBYTE; goto ARR_LD; case CEE_LDELEM_U2: lclTyp = TYP_USHORT; goto ARR_LD; ARR_LD: ARR_LD_POST_VERIFY: /* Pull the index value and array address */ op2 = impPopStack().val; op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); /* Check for null pointer - in the inliner case we simply abort */ if (compIsForInlining()) { if (op1->gtOper == GT_CNS_INT) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM); return; } } /* Mark the block as containing an index expression */ if (op1->gtOper == GT_LCL_VAR) { if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node and push it on the stack */ op1 = gtNewIndexRef(lclTyp, op1, op2); ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT); if ((opcode == CEE_LDELEMA) || ldstruct || (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd))) { assert(ldelemClsHnd != DUMMY_INIT(NULL)); // remember the element size if (lclTyp == TYP_REF) { op1->AsIndex()->gtIndElemSize = TARGET_POINTER_SIZE; } else { // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type. if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF) { op1->AsIndex()->gtStructElemClass = ldelemClsHnd; } assert(lclTyp != TYP_STRUCT || op1->AsIndex()->gtStructElemClass != nullptr); if (lclTyp == TYP_STRUCT) { size = info.compCompHnd->getClassSize(ldelemClsHnd); op1->AsIndex()->gtIndElemSize = size; op1->gtType = lclTyp; } } if ((opcode == CEE_LDELEMA) || ldstruct) { // wrap it in a & lclTyp = TYP_BYREF; op1 = gtNewOperNode(GT_ADDR, lclTyp, op1); } else { assert(lclTyp != TYP_STRUCT); } } if (ldstruct) { // Create an OBJ for the result op1 = gtNewObjNode(ldelemClsHnd, op1); op1->gtFlags |= GTF_EXCEPT; } impPushOnStack(op1, tiRetVal); break; // stelem for reference and value types case CEE_STELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); stelemClsHnd = resolvedToken.hClass; // If it's a reference type just behave as though it's a stelem.ref instruction if (!eeIsValueClass(stelemClsHnd)) { goto STELEM_REF_POST_VERIFY; } // Otherwise extract the type { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd); lclTyp = JITtype2varType(jitTyp); goto ARR_ST_POST_VERIFY; } case CEE_STELEM_REF: STELEM_REF_POST_VERIFY: if (opts.OptimizationEnabled()) { GenTree* array = impStackTop(2).val; GenTree* value = impStackTop().val; // Is this a case where we can skip the covariant store check? if (impCanSkipCovariantStoreCheck(value, array)) { lclTyp = TYP_REF; goto ARR_ST_POST_VERIFY; } } // Else call a helper function to do the assignment op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopCallArgs(3, nullptr)); goto SPILL_APPEND; case CEE_STELEM_I1: lclTyp = TYP_BYTE; goto ARR_ST; case CEE_STELEM_I2: lclTyp = TYP_SHORT; goto ARR_ST; case CEE_STELEM_I: lclTyp = TYP_I_IMPL; goto ARR_ST; case CEE_STELEM_I4: lclTyp = TYP_INT; goto ARR_ST; case CEE_STELEM_I8: lclTyp = TYP_LONG; goto ARR_ST; case CEE_STELEM_R4: lclTyp = TYP_FLOAT; goto ARR_ST; case CEE_STELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_ST; ARR_ST: ARR_ST_POST_VERIFY: /* The strict order of evaluation is LHS-operands, RHS-operands, range-check, and then assignment. However, codegen currently does the range-check before evaluation the RHS-operands. So to maintain strict ordering, we spill the stack. */ if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Strict ordering of exceptions for Array store")); } /* Pull the new value from the stack */ op2 = impPopStack().val; /* Pull the index value */ op1 = impPopStack().val; /* Pull the array address */ op3 = impPopStack().val; assertImp(op3->gtType == TYP_REF); if (op2->IsLocalAddrExpr() != nullptr) { op2->gtType = TYP_I_IMPL; } // Mark the block as containing an index expression if (op3->gtOper == GT_LCL_VAR) { if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node */ op1 = gtNewIndexRef(lclTyp, op3, op1); /* Create the assignment node and append it */ if (lclTyp == TYP_STRUCT) { assert(stelemClsHnd != DUMMY_INIT(NULL)); op1->AsIndex()->gtStructElemClass = stelemClsHnd; op1->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd); } if (varTypeIsStruct(op1)) { op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL); } else { op2 = impImplicitR4orR8Cast(op2, op1->TypeGet()); op1 = gtNewAssignNode(op1, op2); } /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; goto SPILL_APPEND; case CEE_ADD: oper = GT_ADD; goto MATH_OP2; case CEE_ADD_OVF: uns = false; goto ADD_OVF; case CEE_ADD_OVF_UN: uns = true; goto ADD_OVF; ADD_OVF: ovfl = true; callNode = false; oper = GT_ADD; goto MATH_OP2_FLAGS; case CEE_SUB: oper = GT_SUB; goto MATH_OP2; case CEE_SUB_OVF: uns = false; goto SUB_OVF; case CEE_SUB_OVF_UN: uns = true; goto SUB_OVF; SUB_OVF: ovfl = true; callNode = false; oper = GT_SUB; goto MATH_OP2_FLAGS; case CEE_MUL: oper = GT_MUL; goto MATH_MAYBE_CALL_NO_OVF; case CEE_MUL_OVF: uns = false; goto MUL_OVF; case CEE_MUL_OVF_UN: uns = true; goto MUL_OVF; MUL_OVF: ovfl = true; oper = GT_MUL; goto MATH_MAYBE_CALL_OVF; // Other binary math operations case CEE_DIV: oper = GT_DIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_DIV_UN: oper = GT_UDIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM: oper = GT_MOD; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM_UN: oper = GT_UMOD; goto MATH_MAYBE_CALL_NO_OVF; MATH_MAYBE_CALL_NO_OVF: ovfl = false; MATH_MAYBE_CALL_OVF: // Morpher has some complex logic about when to turn different // typed nodes on different platforms into helper calls. We // need to either duplicate that logic here, or just // pessimistically make all the nodes large enough to become // call nodes. Since call nodes aren't that much larger and // these opcodes are infrequent enough I chose the latter. callNode = true; goto MATH_OP2_FLAGS; case CEE_AND: oper = GT_AND; goto MATH_OP2; case CEE_OR: oper = GT_OR; goto MATH_OP2; case CEE_XOR: oper = GT_XOR; goto MATH_OP2; MATH_OP2: // For default values of 'ovfl' and 'callNode' ovfl = false; callNode = false; MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set /* Pull two values and push back the result */ op2 = impPopStack().val; op1 = impPopStack().val; /* Can't do arithmetic with references */ assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF); // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only // if it is in the stack) impBashVarAddrsToI(op1, op2); type = impGetByRefResultType(oper, uns, &op1, &op2); assert(!ovfl || !varTypeIsFloating(op1->gtType)); /* Special case: "int+0", "int-0", "int*1", "int/1" */ if (op2->gtOper == GT_CNS_INT) { if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) || (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV))) { impPushOnStack(op1, tiRetVal); break; } } // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand // if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { if (op1->TypeGet() != type) { // We insert a cast of op1 to 'type' op1 = gtNewCastNode(type, op1, false, type); } if (op2->TypeGet() != type) { // We insert a cast of op2 to 'type' op2 = gtNewCastNode(type, op2, false, type); } } if (callNode) { /* These operators can later be transformed into 'GT_CALL' */ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]); #ifndef TARGET_ARM assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]); #endif // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying // that we'll need to transform into a general large node, but rather specifically // to a call: by doing it this way, things keep working if there are multiple sizes, // and a CALL is no longer the largest. // That said, as of now it *is* a large node, so we'll do this with an assert rather // than an "if". assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE); op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true)); } else { op1 = gtNewOperNode(oper, type, op1, op2); } /* Special case: integer/long division may throw an exception */ if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this)) { op1->gtFlags |= GTF_EXCEPT; } if (ovfl) { assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL); if (ovflType != TYP_UNKNOWN) { op1->gtType = ovflType; } op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } } impPushOnStack(op1, tiRetVal); break; case CEE_SHL: oper = GT_LSH; goto CEE_SH_OP2; case CEE_SHR: oper = GT_RSH; goto CEE_SH_OP2; case CEE_SHR_UN: oper = GT_RSZ; goto CEE_SH_OP2; CEE_SH_OP2: op2 = impPopStack().val; op1 = impPopStack().val; // operand to be shifted impBashVarAddrsToI(op1, op2); type = genActualType(op1->TypeGet()); op1 = gtNewOperNode(oper, type, op1, op2); impPushOnStack(op1, tiRetVal); break; case CEE_NOT: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); type = genActualType(op1->TypeGet()); impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal); break; case CEE_CKFINITE: op1 = impPopStack().val; type = op1->TypeGet(); op1 = gtNewOperNode(GT_CKFINITE, type, op1); op1->gtFlags |= GTF_EXCEPT; impPushOnStack(op1, tiRetVal); break; case CEE_LEAVE: val = getI4LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val); goto LEAVE; case CEE_LEAVE_S: val = getI1LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val); LEAVE: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE); return; } JITDUMP(" %04X", jmpAddr); if (block->bbJumpKind != BBJ_LEAVE) { impResetLeaveBlock(block, jmpAddr); } assert(jmpAddr == block->bbJumpDest->bbCodeOffs); impImportLeave(block); impNoteBranchOffs(); break; case CEE_BR: case CEE_BR_S: jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0) { break; /* NOP */ } impNoteBranchOffs(); break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: /* Pop the comparand (now there's a neat term) from the stack */ op1 = impPopStack().val; type = op1->TypeGet(); // Per Ecma-355, brfalse and brtrue are only specified for nint, ref, and byref. // // We've historically been a bit more permissive, so here we allow // any type that gtNewZeroConNode can handle. if (!varTypeIsArithmetic(type) && !varTypeIsGC(type)) { BADCODE("invalid type for brtrue/brfalse"); } if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { op1 = gtUnusedValNode(op1); goto SPILL_APPEND; } else { break; } } if (op1->OperIsCompare()) { if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S) { // Flip the sense of the compare op1 = gtReverseCond(op1); } } else { // We'll compare against an equally-sized integer 0 // For small types, we always compare against int op2 = gtNewZeroConNode(genActualType(op1->gtType)); // Create the comparison operator and try to fold it oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ; op1 = gtNewOperNode(oper, TYP_INT, op1, op2); } // fall through COND_JUMP: /* Fold comparison if we can */ op1 = gtFoldExpr(op1); /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/ /* Don't make any blocks unreachable in import only mode */ if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly()) { /* gtFoldExpr() should prevent this as we don't want to make any blocks unreachable under compDbgCode */ assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); assertImp((block->bbJumpKind == BBJ_COND) // normal case || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the // block for the second time block->bbJumpKind = foldedJumpKind; #ifdef DEBUG if (verbose) { if (op1->AsIntCon()->gtIconVal) { printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->bbJumpDest->bbNum); } else { printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum); } } #endif break; } op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1); /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt' in impImportBlock(block). For correct line numbers, spill stack. */ if (opts.compDbgCode && impCurStmtDI.IsValid()) { impSpillStackEnsure(true); } goto SPILL_APPEND; case CEE_CEQ: oper = GT_EQ; uns = false; goto CMP_2_OPs; case CEE_CGT_UN: oper = GT_GT; uns = true; goto CMP_2_OPs; case CEE_CGT: oper = GT_GT; uns = false; goto CMP_2_OPs; case CEE_CLT_UN: oper = GT_LT; uns = true; goto CMP_2_OPs; case CEE_CLT: oper = GT_LT; uns = false; goto CMP_2_OPs; CMP_2_OPs: op2 = impPopStack().val; op1 = impPopStack().val; // Recognize the IL idiom of CGT_UN(op1, 0) and normalize // it so that downstream optimizations don't have to. if ((opcode == CEE_CGT_UN) && op2->IsIntegralConst(0)) { oper = GT_NE; uns = false; } #ifdef TARGET_64BIT // TODO-Casts: create a helper that upcasts int32 -> native int when necessary. // See also identical code in impGetByRefResultType and STSFLD import. if (varTypeIsI(op1) && (genActualType(op2) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, TYP_I_IMPL); } else if (varTypeIsI(op2) && (genActualType(op1) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1) == genActualType(op2) || (varTypeIsI(op1) && varTypeIsI(op2)) || (varTypeIsFloating(op1) && varTypeIsFloating(op2))); // Create the comparison node. op1 = gtNewOperNode(oper, TYP_INT, op1, op2); // TODO: setting both flags when only one is appropriate. if (uns) { op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED; } // Fold result, if possible. op1 = gtFoldExpr(op1); impPushOnStack(op1, tiRetVal); break; case CEE_BEQ_S: case CEE_BEQ: oper = GT_EQ; goto CMP_2_OPs_AND_BR; case CEE_BGE_S: case CEE_BGE: oper = GT_GE; goto CMP_2_OPs_AND_BR; case CEE_BGE_UN_S: case CEE_BGE_UN: oper = GT_GE; goto CMP_2_OPs_AND_BR_UN; case CEE_BGT_S: case CEE_BGT: oper = GT_GT; goto CMP_2_OPs_AND_BR; case CEE_BGT_UN_S: case CEE_BGT_UN: oper = GT_GT; goto CMP_2_OPs_AND_BR_UN; case CEE_BLE_S: case CEE_BLE: oper = GT_LE; goto CMP_2_OPs_AND_BR; case CEE_BLE_UN_S: case CEE_BLE_UN: oper = GT_LE; goto CMP_2_OPs_AND_BR_UN; case CEE_BLT_S: case CEE_BLT: oper = GT_LT; goto CMP_2_OPs_AND_BR; case CEE_BLT_UN_S: case CEE_BLT_UN: oper = GT_LT; goto CMP_2_OPs_AND_BR_UN; case CEE_BNE_UN_S: case CEE_BNE_UN: oper = GT_NE; goto CMP_2_OPs_AND_BR_UN; CMP_2_OPs_AND_BR_UN: uns = true; unordered = true; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR: uns = false; unordered = false; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR_ALL: /* Pull two values */ op2 = impPopStack().val; op1 = impPopStack().val; #ifdef TARGET_64BIT if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet())) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op1 side effect")); impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } if (op2->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op2 side effect")); impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } #ifdef DEBUG if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT) { impNoteLastILoffs(); } #endif break; } // We can generate an compare of different sized floating point op1 and op2 // We insert a cast // if (varTypeIsFloating(op1->TypeGet())) { if (op1->TypeGet() != op2->TypeGet()) { assert(varTypeIsFloating(op2->TypeGet())); // say op1=double, op2=float. To avoid loss of precision // while comparing, op2 is converted to double and double // comparison is done. if (op1->TypeGet() == TYP_DOUBLE) { // We insert a cast of op2 to TYP_DOUBLE op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_DOUBLE) { // We insert a cast of op1 to TYP_DOUBLE op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } } /* Create and append the operator */ op1 = gtNewOperNode(oper, TYP_INT, op1, op2); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } if (unordered) { op1->gtFlags |= GTF_RELOP_NAN_UN; } goto COND_JUMP; case CEE_SWITCH: /* Pop the switch value off the stack */ op1 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op1->TypeGet())); /* We can create a switch node */ op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1); val = (int)getU4LittleEndian(codeAddr); codeAddr += 4 + val * 4; // skip over the switch-table goto SPILL_APPEND; /************************** Casting OPCODES ***************************/ case CEE_CONV_OVF_I1: lclTyp = TYP_BYTE; goto CONV_OVF; case CEE_CONV_OVF_I2: lclTyp = TYP_SHORT; goto CONV_OVF; case CEE_CONV_OVF_I: lclTyp = TYP_I_IMPL; goto CONV_OVF; case CEE_CONV_OVF_I4: lclTyp = TYP_INT; goto CONV_OVF; case CEE_CONV_OVF_I8: lclTyp = TYP_LONG; goto CONV_OVF; case CEE_CONV_OVF_U1: lclTyp = TYP_UBYTE; goto CONV_OVF; case CEE_CONV_OVF_U2: lclTyp = TYP_USHORT; goto CONV_OVF; case CEE_CONV_OVF_U: lclTyp = TYP_U_IMPL; goto CONV_OVF; case CEE_CONV_OVF_U4: lclTyp = TYP_UINT; goto CONV_OVF; case CEE_CONV_OVF_U8: lclTyp = TYP_ULONG; goto CONV_OVF; case CEE_CONV_OVF_I1_UN: lclTyp = TYP_BYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_I2_UN: lclTyp = TYP_SHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_I_UN: lclTyp = TYP_I_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_I4_UN: lclTyp = TYP_INT; goto CONV_OVF_UN; case CEE_CONV_OVF_I8_UN: lclTyp = TYP_LONG; goto CONV_OVF_UN; case CEE_CONV_OVF_U1_UN: lclTyp = TYP_UBYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_U2_UN: lclTyp = TYP_USHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_U_UN: lclTyp = TYP_U_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_U4_UN: lclTyp = TYP_UINT; goto CONV_OVF_UN; case CEE_CONV_OVF_U8_UN: lclTyp = TYP_ULONG; goto CONV_OVF_UN; CONV_OVF_UN: uns = true; goto CONV_OVF_COMMON; CONV_OVF: uns = false; goto CONV_OVF_COMMON; CONV_OVF_COMMON: ovfl = true; goto _CONV; case CEE_CONV_I1: lclTyp = TYP_BYTE; goto CONV; case CEE_CONV_I2: lclTyp = TYP_SHORT; goto CONV; case CEE_CONV_I: lclTyp = TYP_I_IMPL; goto CONV; case CEE_CONV_I4: lclTyp = TYP_INT; goto CONV; case CEE_CONV_I8: lclTyp = TYP_LONG; goto CONV; case CEE_CONV_U1: lclTyp = TYP_UBYTE; goto CONV; case CEE_CONV_U2: lclTyp = TYP_USHORT; goto CONV; #if (REGSIZE_BYTES == 8) case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV_UN; #else case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV; #endif case CEE_CONV_U4: lclTyp = TYP_UINT; goto CONV; case CEE_CONV_U8: lclTyp = TYP_ULONG; goto CONV_UN; case CEE_CONV_R4: lclTyp = TYP_FLOAT; goto CONV; case CEE_CONV_R8: lclTyp = TYP_DOUBLE; goto CONV; case CEE_CONV_R_UN: lclTyp = TYP_DOUBLE; goto CONV_UN; CONV_UN: uns = true; ovfl = false; goto _CONV; CONV: uns = false; ovfl = false; goto _CONV; _CONV: // only converts from FLOAT or DOUBLE to an integer type // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls if (varTypeIsFloating(lclTyp)) { callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl #ifdef TARGET_64BIT // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? // TYP_BYREF could be used as TYP_I_IMPL which is long. // TODO-CQ: remove this when we lower casts long/ulong --> float/double // and generate SSE2 code instead of going through helper calls. || (impStackTop().val->TypeGet() == TYP_BYREF) #endif ; } else { callNode = varTypeIsFloating(impStackTop().val->TypeGet()); } op1 = impPopStack().val; impBashVarAddrsToI(op1); // Casts from floating point types must not have GTF_UNSIGNED set. if (varTypeIsFloating(op1)) { uns = false; } // At this point uns, ovf, callNode are all set. if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND) { op2 = op1->AsOp()->gtOp2; if (op2->gtOper == GT_CNS_INT) { ssize_t ival = op2->AsIntCon()->gtIconVal; ssize_t mask, umask; switch (lclTyp) { case TYP_BYTE: case TYP_UBYTE: mask = 0x00FF; umask = 0x007F; break; case TYP_USHORT: case TYP_SHORT: mask = 0xFFFF; umask = 0x7FFF; break; default: assert(!"unexpected type"); return; } if (((ival & umask) == ival) || ((ival & mask) == ival && uns)) { /* Toss the cast, it's a waste of time */ impPushOnStack(op1, tiRetVal); break; } else if (ival == mask) { /* Toss the masking, it's a waste of time, since we sign-extend from the small value anyways */ op1 = op1->AsOp()->gtOp1; } } } /* The 'op2' sub-operand of a cast is the 'real' type number, since the result of a cast to one of the 'small' integer types is an integer. */ type = genActualType(lclTyp); // If this is a no-op cast, just use op1. if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp))) { // Nothing needs to change } // Work is evidently required, add cast node else { if (callNode) { op1 = gtNewCastNodeL(type, op1, uns, lclTyp); } else { op1 = gtNewCastNode(type, op1, uns, lclTyp); } if (ovfl) { op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT); } if (op1->gtGetOp1()->OperIsConst() && opts.OptimizationEnabled()) { // Try and fold the introduced cast op1 = gtFoldExprConst(op1); } } impPushOnStack(op1, tiRetVal); break; case CEE_NEG: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal); break; case CEE_POP: { /* Pull the top value from the stack */ StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; /* Get hold of the type of the value being duplicated */ lclTyp = genActualType(op1->gtType); /* Does the value have any side effects? */ if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode) { // Since we are throwing away the value, just normalize // it to its address. This is more efficient. if (varTypeIsStruct(op1)) { JITDUMP("\n ... CEE_POP struct ...\n"); DISPTREE(op1); #ifdef UNIX_AMD64_ABI // Non-calls, such as obj or ret_expr, have to go through this. // Calls with large struct return value have to go through this. // Helper calls with small struct return value also have to go // through this since they do not follow Unix calling convention. if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd, op1->AsCall()->GetUnmanagedCallConv()) || op1->AsCall()->gtCallType == CT_HELPER) #endif // UNIX_AMD64_ABI { // If the value being produced comes from loading // via an underlying address, just null check the address. if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ)) { gtChangeOperToNullCheck(op1, block); } else { op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false); } JITDUMP("\n ... optimized to ...\n"); DISPTREE(op1); } } // If op1 is non-overflow cast, throw it away since it is useless. // Another reason for throwing away the useless cast is in the context of // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)). // The cast gets added as part of importing GT_CALL, which gets in the way // of fgMorphCall() on the forms of tail call nodes that we assert. if ((op1->gtOper == GT_CAST) && !op1->gtOverflow()) { op1 = op1->AsOp()->gtOp1; } if (op1->gtOper != GT_CALL) { if ((op1->gtFlags & GTF_SIDE_EFFECT) != 0) { op1 = gtUnusedValNode(op1); } else { // Can't bash to NOP here because op1 can be referenced from `currentBlock->bbEntryState`, // if we ever need to reimport we need a valid LCL_VAR on it. op1 = gtNewNothingNode(); } } /* Append the value to the tree list */ goto SPILL_APPEND; } /* No side effects - just throw the <BEEP> thing away */ } break; case CEE_DUP: { StackEntry se = impPopStack(); GenTree* tree = se.val; tiRetVal = se.seTypeInfo; op1 = tree; // If the expression to dup is simple, just clone it. // Otherwise spill it to a temp, and reload the temp twice. bool cloneExpr = false; if (!opts.compDbgCode) { // Duplicate 0 and +0.0 if (op1->IsIntegralConst(0) || op1->IsFloatPositiveZero()) { cloneExpr = true; } // Duplicate locals and addresses of them else if (op1->IsLocal()) { cloneExpr = true; } else if (op1->TypeIs(TYP_BYREF) && op1->OperIs(GT_ADDR) && op1->gtGetOp1()->IsLocal() && (OPCODE)impGetNonPrefixOpcode(codeAddr + sz, codeEndp) != CEE_INITOBJ) { cloneExpr = true; } } else { // Always clone for debug mode cloneExpr = true; } if (!cloneExpr) { const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill")); impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types type = genActualType(lvaTable[tmpNum].TypeGet()); op1 = gtNewLclvNode(tmpNum, type); // Propagate type info to the temp from the stack and the original tree if (type == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", tmpNum); lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle()); } } op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("DUP instruction")); assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT)); impPushOnStack(op1, tiRetVal); impPushOnStack(op2, tiRetVal); } break; case CEE_STIND_I1: lclTyp = TYP_BYTE; goto STIND; case CEE_STIND_I2: lclTyp = TYP_SHORT; goto STIND; case CEE_STIND_I4: lclTyp = TYP_INT; goto STIND; case CEE_STIND_I8: lclTyp = TYP_LONG; goto STIND; case CEE_STIND_I: lclTyp = TYP_I_IMPL; goto STIND; case CEE_STIND_REF: lclTyp = TYP_REF; goto STIND; case CEE_STIND_R4: lclTyp = TYP_FLOAT; goto STIND; case CEE_STIND_R8: lclTyp = TYP_DOUBLE; goto STIND; STIND: op2 = impPopStack().val; // value to store op1 = impPopStack().val; // address to store to // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); impBashVarAddrsToI(op1, op2); op2 = impImplicitR4orR8Cast(op2, lclTyp); #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // TARGET_64BIT if (opcode == CEE_STIND_REF) { // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType)); lclTyp = genActualType(op2->TypeGet()); } // Check target type. #ifdef DEBUG if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF) { if (op2->gtType == TYP_BYREF) { assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL); } else if (lclTyp == TYP_BYREF) { assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType)); } } else { assertImp(genActualType(op2->gtType) == genActualType(lclTyp) || ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp))); } #endif op1 = gtNewOperNode(GT_IND, lclTyp, op1); // stind could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE; if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } op1 = gtNewAssignNode(op1, op2); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; // Spill side-effects AND global-data-accesses if (verCurrentState.esStackDepth > 0) { impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND")); } goto APPEND; case CEE_LDIND_I1: lclTyp = TYP_BYTE; goto LDIND; case CEE_LDIND_I2: lclTyp = TYP_SHORT; goto LDIND; case CEE_LDIND_U4: case CEE_LDIND_I4: lclTyp = TYP_INT; goto LDIND; case CEE_LDIND_I8: lclTyp = TYP_LONG; goto LDIND; case CEE_LDIND_REF: lclTyp = TYP_REF; goto LDIND; case CEE_LDIND_I: lclTyp = TYP_I_IMPL; goto LDIND; case CEE_LDIND_R4: lclTyp = TYP_FLOAT; goto LDIND; case CEE_LDIND_R8: lclTyp = TYP_DOUBLE; goto LDIND; case CEE_LDIND_U1: lclTyp = TYP_UBYTE; goto LDIND; case CEE_LDIND_U2: lclTyp = TYP_USHORT; goto LDIND; LDIND: op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); #ifdef TARGET_64BIT // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (genActualType(op1->gtType) == TYP_INT) { op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL); } #endif assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, lclTyp, op1); // ldind could point anywhere, example a boxed class static int op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; case CEE_UNALIGNED: assert(sz == 1); val = getU1LittleEndian(codeAddr); ++codeAddr; JITDUMP(" %u", val); if ((val != 1) && (val != 2) && (val != 4)) { BADCODE("Alignment unaligned. must be 1, 2, or 4"); } Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes"); prefixFlags |= PREFIX_UNALIGNED; impValidateMemoryAccessOpcode(codeAddr, codeEndp, false); PREFIX: opcode = (OPCODE)getU1LittleEndian(codeAddr); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; case CEE_VOLATILE: Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes"); prefixFlags |= PREFIX_VOLATILE; impValidateMemoryAccessOpcode(codeAddr, codeEndp, true); assert(sz == 0); goto PREFIX; case CEE_LDFTN: { // Need to do a lookup here so that we perform an access check // and do a NOWAY if protections are violated _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); DO_LDFTN: op1 = impMethodPointer(&resolvedToken, &callInfo); if (compDonotInline()) { return; } // Call info may have more precise information about the function than // the resolved token. CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(callInfo.hMethod != nullptr); heapToken->hMethod = callInfo.hMethod; impPushOnStack(op1, typeInfo(heapToken)); break; } case CEE_LDVIRTFTN: { /* Get the method token */ _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */, combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), CORINFO_CALLINFO_CALLVIRT), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } mflags = callInfo.methodFlags; impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); if (compIsForInlining()) { if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL); return; } } CORINFO_SIG_INFO& ftnSig = callInfo.sig; /* Get the object-ref */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); if (opts.IsReadyToRun()) { if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } } else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo); if (compDonotInline()) { return; } CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(heapToken->tokenType == CORINFO_TOKENKIND_Method); assert(callInfo.hMethod != nullptr); heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn; heapToken->hMethod = callInfo.hMethod; impPushOnStack(fptr, typeInfo(heapToken)); break; } case CEE_CONSTRAINED: assertImp(sz == sizeof(unsigned)); impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained); codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually JITDUMP(" (%08X) ", constrainedResolvedToken.token); Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes"); prefixFlags |= PREFIX_CONSTRAINED; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN) { BADCODE("constrained. has to be followed by callvirt, call or ldftn"); } } goto PREFIX; case CEE_READONLY: JITDUMP(" readonly."); Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes"); prefixFlags |= PREFIX_READONLY; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("readonly. has to be followed by ldelema or call"); } } assert(sz == 0); goto PREFIX; case CEE_TAILCALL: JITDUMP(" tail."); Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("tailcall. has to be followed by call, callvirt or calli"); } } assert(sz == 0); goto PREFIX; case CEE_NEWOBJ: /* Since we will implicitly insert newObjThisPtr at the start of the argument list, spill any GTF_ORDER_SIDEEFF */ impSpillSpecialSideEff(); /* NEWOBJ does not respond to TAIL */ prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT; /* NEWOBJ does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; _impResolveToken(CORINFO_TOKENKIND_NewObj); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM), &callInfo); mflags = callInfo.methodFlags; if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0) { BADCODE("newobj on static or abstract method"); } // Insert the security callout before any actual code is generated impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); // There are three different cases for new // Object size is variable (depends on arguments) // 1) Object is an array (arrays treated specially by the EE) // 2) Object is some other variable sized object (e.g. String) // 3) Class Size can be determined beforehand (normal case) // In the first case, we need to call a NEWOBJ helper (multinewarray) // in the second case we call the constructor with a '0' this pointer // In the third case we alloc the memory, then call the constuctor clsFlags = callInfo.classFlags; if (clsFlags & CORINFO_FLG_ARRAY) { // Arrays need to call the NEWOBJ helper. assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE); impImportNewObjArray(&resolvedToken, &callInfo); if (compDonotInline()) { return; } callTyp = TYP_REF; break; } // At present this can only be String else if (clsFlags & CORINFO_FLG_VAROBJSIZE) { // Skip this thisPtr argument newObjThisPtr = nullptr; /* Remember that this basic block contains 'new' of an object */ block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; } else { // This is the normal case where the size of the object is // fixed. Allocate the memory and call the constructor. // Note: We cannot add a peep to avoid use of temp here // becase we don't have enough interference info to detect when // sources and destination interfere, example: s = new S(ref); // TODO: We find the correct place to introduce a general // reverse copy prop for struct return values from newobj or // any function returning structs. /* get a temporary for the new object */ lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp")); if (compDonotInline()) { // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS. assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } // In the value class case we only need clsHnd for size calcs. // // The lookup of the code pointer will be handled by CALL in this case if (clsFlags & CORINFO_FLG_VALUECLASS) { if (compIsForInlining()) { // If value class has GC fields, inform the inliner. It may choose to // bail out on the inline. DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; // some policies do not track the relative hotness of call sites for // "always" inline cases. if (impInlineInfo->iciBlock->isRunRarely()) { compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } } } } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lvaTable[lclNum].lvType = JITtype2varType(jitTyp); } else { // The local variable itself is the allocated space. // Here we need unsafe value cls check, since the address of struct is taken for further use // and potentially exploitable. lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */); } bool bbInALoop = impBlockIsInALoop(block); bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) && (!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN)); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { // Append a tree to zero-out the temp newObjThisPtr = gtNewLclvNode(lclNum, lclDsc->TypeGet()); newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } else { JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", lclNum); lclDsc->lvSuppressedZeroInit = 1; compSuppressedZeroInit = true; } // Obtain the address of the temp newObjThisPtr = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet())); } else { // If we're newing up a finalizable object, spill anything that can cause exceptions. // bool hasSideEffects = false; CorInfoHelpFunc newHelper = info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd, &hasSideEffects); if (hasSideEffects) { JITDUMP("\nSpilling stack for finalizable newobj\n"); impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill")); } const bool useParent = true; op1 = gtNewAllocObjNode(&resolvedToken, useParent); if (op1 == nullptr) { return; } // Remember that this basic block contains 'new' of an object block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Append the assignment to the temp/local. Dont need to spill // at all as we are just calling an EE-Jit helper which can only // cause an (async) OutOfMemoryException. // We assign the newly allocated object (by a GT_ALLOCOBJ node) // to a temp. Note that the pattern "temp = allocObj" is required // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes // without exhaustive walk over all expressions. impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE); assert(lvaTable[lclNum].lvSingleDef == 0); lvaTable[lclNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", lclNum); lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */); newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF); } } goto CALL; case CEE_CALLI: /* CALLI does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; FALLTHROUGH; case CEE_CALLVIRT: case CEE_CALL: // We can't call getCallInfo on the token from a CALLI, but we need it in // many other places. We unfortunately embed that knowledge here. if (opcode != CEE_CALLI) { _impResolveToken(CORINFO_TOKENKIND_Method); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, // this is how impImportCall invokes getCallInfo combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT : CORINFO_CALLINFO_NONE), &callInfo); } else { // Suppress uninitialized use warning. memset(&resolvedToken, 0, sizeof(resolvedToken)); memset(&callInfo, 0, sizeof(callInfo)); resolvedToken.token = getU4LittleEndian(codeAddr); resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; } CALL: // memberRef should be set. // newObjThisPtr should be set for CEE_NEWOBJ JITDUMP(" %08X", resolvedToken.token); constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0; bool newBBcreatedForTailcallStress; bool passedStressModeValidation; newBBcreatedForTailcallStress = false; passedStressModeValidation = true; if (compIsForInlining()) { if (compDonotInline()) { return; } // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks. assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0); } else { if (compTailCallStress()) { // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()? // Tail call stress only recognizes call+ret patterns and forces them to be // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress // doesn't import 'ret' opcode following the call into the basic block containing // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks() // is already checking that there is an opcode following call and hence it is // safe here to read next opcode without bounds check. newBBcreatedForTailcallStress = impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't // make it jump to RET. (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT); if (newBBcreatedForTailcallStress && !hasTailPrefix) { // Do a more detailed evaluation of legality const bool returnFalseIfInvalid = true; const bool passedConstraintCheck = verCheckTailCallConstraint(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, returnFalseIfInvalid); if (passedConstraintCheck) { // Now check with the runtime CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod; bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) || (callInfo.kind == CORINFO_VIRTUALCALL_VTABLE); CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd, hasTailPrefix)) // Is it legal to do tailcall? { // Stress the tailcall. JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; prefixFlags |= PREFIX_TAILCALL_STRESS; } else { // Runtime disallows this tail call JITDUMP(" (Tailcall stress: runtime preventing tailcall)"); passedStressModeValidation = false; } } else { // Constraints disallow this tail call JITDUMP(" (Tailcall stress: constraint check failed)"); passedStressModeValidation = false; } } } } // This is split up to avoid goto flow warnings. bool isRecursive; isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd); // If we've already disqualified this call as a tail call under tail call stress, // don't consider it for implicit tail calling either. // // When not running under tail call stress, we may mark this call as an implicit // tail call candidate. We'll do an "equivalent" validation during impImportCall. // // Note that when running under tail call stress, a call marked as explicit // tail prefixed will not be considered for implicit tail calling. if (passedStressModeValidation && impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive)) { if (compIsForInlining()) { #if FEATURE_TAILCALL_OPT_SHARED_RETURN // Are we inlining at an implicit tail call site? If so the we can flag // implicit tail call sites in the inline body. These call sites // often end up in non BBJ_RETURN blocks, so only flag them when // we're able to handle shared returns. if (impInlineInfo->iciCall->IsImplicitTailCall()) { JITDUMP("\n (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN } else { JITDUMP("\n (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } } // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call). explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0; readonlyCall = (prefixFlags & PREFIX_READONLY) != 0; if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ) { // All calls and delegates need a security callout. // For delegates, this is the call to the delegate constructor, not the access check on the // LD(virt)FTN. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); } callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, newObjThisPtr, prefixFlags, &callInfo, opcodeOffs); if (compDonotInline()) { // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue. assert((callTyp == TYP_UNDEF) || (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS)); return; } if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we // have created a new BB after the "call" // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless. { assert(!compIsForInlining()); goto RET; } break; case CEE_LDFLD: case CEE_LDSFLD: case CEE_LDFLDA: case CEE_LDSFLDA: { bool isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA); bool isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA); /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; CORINFO_CLASS_HANDLE objType = nullptr; // used for fields if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA) { tiObj = &impStackTop().seTypeInfo; StackEntry se = impPopStack(); objType = se.seTypeInfo.GetClassHandle(); obj = se.val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; clsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER); return; default: break; } if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT && clsHnd) { if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) && !(info.compFlags & CORINFO_FLG_FORCEINLINE)) { // Loading a static valuetype field usually will cause a JitHelper to be called // for the static base. This will bloat the code. compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS); if (compInlineResult->IsFailure()) { return; } } } } tiRetVal = verMakeTypeInfo(ciType, clsHnd); if (isLoadAddress) { tiRetVal.MakeByRef(); } else { tiRetVal.NormaliseForStack(); } // Perform this check always to ensure that we get field access exceptions even with // SkipVerification. impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static load accesses non-static field if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj. if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } bool usesHelper = false; switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { // If the object is a struct, what we really want is // for the field to operate on the address of the struct. if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj)) { assert(opcode == CEE_LDFLD && objType != nullptr); obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true); } /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If the object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } // wrap it in a address of operator if necessary if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1); } else { if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1); } break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, nullptr); usesHelper = true; break; case CORINFO_FIELD_STATIC_ADDRESS: // Replace static read-only fields with constant if possible if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) && !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) && (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp))) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd, impTokenLookupContextHandle); if (initClassResult & CORINFO_INITCLASS_INITIALIZED) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr); // We should always be able to access this static's address directly // assert(pFldAddr == nullptr); op1 = impImportStaticReadOnlyField(fldAddr, lclTyp); // Widen small types since we're propagating the value // instead of producing an indir. // op1->gtType = genActualType(lclTyp); goto FIELD_DONE; } } FALLTHROUGH; case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; case CORINFO_FIELD_INTRINSIC_ZERO: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); op1 = gtNewIconNode(0, lclTyp); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_EMPTY_STRING: { assert(aflags & CORINFO_ACCESS_GET); // Import String.Empty as "" (GT_CNS_STR with a fake SconCPX = 0) op1 = gtNewSconNode(EMPTY_STRING_SCON, nullptr); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); #if BIGENDIAN op1 = gtNewIconNode(0, lclTyp); #else op1 = gtNewIconNode(1, lclTyp); #endif goto FIELD_DONE; } break; default: assert(!"Unexpected fieldAccessor"); } if (!isLoadAddress) { if (prefixFlags & PREFIX_VOLATILE) { op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_VOLATILE; } } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_UNALIGNED; } } } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } FIELD_DONE: impPushOnStack(op1, tiRetVal); } break; case CEE_STFLD: case CEE_STSFLD: { bool isStoreStatic = (opcode == CEE_STSFLD); CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type) /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = CORINFO_ACCESS_SET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; typeInfo tiVal; /* Pull the value from the stack */ StackEntry se = impPopStack(); op2 = se.val; tiVal = se.seTypeInfo; clsHnd = tiVal.GetClassHandle(); if (opcode == CEE_STFLD) { tiObj = &impStackTop().seTypeInfo; obj = impPopStack().val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; fieldClsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or * per-inst static? */ switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER); return; default: break; } } impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static store accesses non-static field if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using stfld on a static field. // We allow it, but need to eval any side-effects for obj if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, op2); goto SPILL_APPEND; case CORINFO_FIELD_STATIC_ADDRESS: case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; default: assert(!"Unexpected fieldAccessor"); } // Create the member assignment, unless we have a TYP_STRUCT. bool deferStructAssign = (lclTyp == TYP_STRUCT); if (!deferStructAssign) { if (prefixFlags & PREFIX_VOLATILE) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_IND_UNALIGNED; } /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during importation and reads from the union as if it were a long during code generation. Though this can potentially read garbage, one can get lucky to have this working correctly. This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency on it. To be backward compatible, we will explicitly add an upward cast here so that it works correctly always. Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT for V4.0. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be // generated for ARM as well as x86, so the following IR will be accepted: // STMTx (IL 0x... ???) // * ASG long // +--* CLS_VAR long // \--* CNS_INT int 2 if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) && varTypeIsLong(op1->TypeGet())) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } #endif #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op1' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } op1 = gtNewAssignNode(op1, op2); /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } /* stfld can interfere with value classes (consider the sequence ldloc, ldloca, ..., stfld, stloc). We will be conservative and spill all value class references from the stack. */ if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL))) { assert(tiObj); // If we can resolve the field to be within some local, // then just spill that local. // GenTreeLclVarCommon* const lcl = obj->IsLocalAddrExpr(); if (lcl != nullptr) { impSpillLclRefs(lcl->GetLclNum()); } else if (impIsValueType(tiObj)) { impSpillEvalStack(); } else { impSpillValueClasses(); } } /* Spill any refs to the same member from the stack */ impSpillLclRefs((ssize_t)resolvedToken.hField); /* stsfld also interferes with indirect accesses (for aliased statics) and calls. But don't need to spill other statics as we have explicitly spilled this particular static field. */ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD")); if (deferStructAssign) { op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL); } } goto APPEND; case CEE_NEWARR: { /* Get the class type index operand */ _impResolveToken(CORINFO_TOKENKIND_Newarr); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } tiRetVal = verMakeTypeInfo(resolvedToken.hClass); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); /* Form the arglist: array class handle, size */ op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); #ifdef TARGET_64BIT // The array helper takes a native int for array length. // So if we have an int, explicitly extend it to be a native int. if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { if (op2->IsIntegralConst()) { op2->gtType = TYP_I_IMPL; } else { bool isUnsigned = false; op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL); } } #endif // TARGET_64BIT #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF, gtNewCallArgs(op2)); usingReadyToRunHelper = (op1 != nullptr); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the newarr call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub // 3) Allocate the new array // Reason: performance (today, we'll always use the slow helper for the R2R generics case) // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { GenTreeCall::Use* args = gtNewCallArgs(op1, op2); /* Create a call to 'new' */ // Note that this only works for shared generic code because the same helper is used for all // reference array types op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args); } op1->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass; /* Remember that this basic block contains 'new' of an sd array */ block->bbFlags |= BBF_HAS_NEWARRAY; optMethodFlags |= OMF_HAS_NEWARRAY; /* Push the result of the call on the stack */ impPushOnStack(op1, tiRetVal); callTyp = TYP_REF; } break; case CEE_LOCALLOC: // We don't allow locallocs inside handlers if (block->hasHndIndex()) { BADCODE("Localloc can't be inside handler"); } // Get the size to allocate op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); if (verCurrentState.esStackDepth != 0) { BADCODE("Localloc can only be used when the stack is empty"); } // If the localloc is not in a loop and its size is a small constant, // create a new local var of TYP_BLK and return its address. { bool convertedToLocal = false; // Need to aggressively fold here, as even fixed-size locallocs // will have casts in the way. op2 = gtFoldExpr(op2); if (op2->IsIntegralConst()) { const ssize_t allocSize = op2->AsIntCon()->IconValue(); bool bbInALoop = impBlockIsInALoop(block); if (allocSize == 0) { // Result is nullptr JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n"); op1 = gtNewIconNode(0, TYP_I_IMPL); convertedToLocal = true; } else if ((allocSize > 0) && !bbInALoop) { // Get the size threshold for local conversion ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE; #ifdef DEBUG // Optionally allow this to be modified maxSize = JitConfig.JitStackAllocToLocalSize(); #endif // DEBUG if (allocSize <= maxSize) { const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal")); JITDUMP("Converting stackalloc of %zd bytes to new local V%02u\n", allocSize, stackallocAsLocal); lvaTable[stackallocAsLocal].lvType = TYP_BLK; lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize; lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true; op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK); op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1); convertedToLocal = true; if (!this->opts.compDbgEnC) { // Ensure we have stack security for this method. // Reorder layout since the converted localloc is treated as an unsafe buffer. setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; } } } } if (!convertedToLocal) { // Bail out if inlining and the localloc was not converted. // // Note we might consider allowing the inline, if the call // site is not in a loop. if (compIsForInlining()) { InlineObservation obs = op2->IsIntegralConst() ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN; compInlineResult->NoteFatal(obs); return; } op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2); // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd. op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE); // Ensure we have stack security for this method. setNeedsGSSecurityCookie(); /* The FP register may not be back to the original value at the end of the method, even if the frame size is 0, as localloc may have modified it. So we will HAVE to reset it */ compLocallocUsed = true; } else { compLocallocOptimized = true; } } impPushOnStack(op1, tiRetVal); break; case CEE_ISINST: { /* Get the type token */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the isinstanceof_any call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Perform the 'is instance' check on the input object // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false, opcodeOffs); } if (compDonotInline()) { return; } impPushOnStack(op1, tiRetVal); } break; } case CEE_REFANYVAL: // get the class handle and make a ICON node out of it _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); // Call helper GETREFANY(classHandle, op1); op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, gtNewCallArgs(op2, op1)); impPushOnStack(op1, tiRetVal); break; case CEE_REFANYTYPE: op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); if (op1->gtOper == GT_OBJ) { // Get the address of the refany op1 = op1->AsOp()->gtOp1; // Fetch the type from the correct slot op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL)); op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1); } else { assertImp(op1->gtOper == GT_MKREFANY); // The pointer may have side-effects if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT) { impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); #ifdef DEBUG impNoteLastILoffs(); #endif } // We already have the class handle op1 = op1->AsOp()->gtOp2; } // convert native TypeHandle to RuntimeTypeHandle { GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT, helperArgs); CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass(); // The handle struct is returned in register op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); op1->AsCall()->gtRetClsHnd = classHandle; #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, classHandle, op1->AsCall()->GetUnmanagedCallConv()); #endif tiRetVal = typeInfo(TI_STRUCT, classHandle); } impPushOnStack(op1, tiRetVal); break; case CEE_LDTOKEN: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); lastLoadToken = codeAddr; _impResolveToken(CORINFO_TOKENKIND_Ldtoken); tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken); op1 = impTokenToHandle(&resolvedToken, nullptr, true); if (op1 == nullptr) { // compDonotInline() return; } helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE; assert(resolvedToken.hClass != nullptr); if (resolvedToken.hMethod != nullptr) { helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD; } else if (resolvedToken.hField != nullptr) { helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD; } GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs); // The handle struct is returned in register and // it could be consumed both as `TYP_STRUCT` and `TYP_REF`. op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, tokenType, op1->AsCall()->GetUnmanagedCallConv()); #endif op1->AsCall()->gtRetClsHnd = tokenType; tiRetVal = verMakeTypeInfo(tokenType); impPushOnStack(op1, tiRetVal); } break; case CEE_UNBOX: case CEE_UNBOX_ANY: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); bool runtimeLookup; op2 = impTokenToHandle(&resolvedToken, &runtimeLookup); if (op2 == nullptr) { assert(compDonotInline()); return; } // Run this always so we can get access exceptions even with SkipVerification. accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n"); op1 = impPopStack().val; goto CASTCLASS; } /* Pop the object and create the unbox helper call */ /* You might think that for UNBOX_ANY we need to push a different */ /* (non-byref) type, but here we're making the tiRetVal that is used */ /* for the intermediate pointer which we then transfer onto the OBJ */ /* instruction. OBJ then creates the appropriate tiRetVal. */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass); assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE); // Check legality and profitability of inline expansion for unboxing. const bool canExpandInline = (helper == CORINFO_HELP_UNBOX); const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled(); if (canExpandInline && shouldExpandInline) { // See if we know anything about the type of op1, the object being unboxed. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(op1, &isExact, &isNonNull); // We can skip the "exact" bit here as we are comparing to a value class. // compareTypesForEquality should bail on comparisions for shared value classes. if (clsHnd != NO_CLASS_HANDLE) { const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(resolvedToken.hClass, clsHnd); if (compare == TypeCompareState::Must) { JITDUMP("\nOptimizing %s (%s) -- type test will succeed\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", eeGetClassName(clsHnd)); // For UNBOX, null check (if necessary), and then leave the box payload byref on the stack. if (opcode == CEE_UNBOX) { GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("optimized unbox clone")); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, boxPayloadOffset); GenTree* nullcheck = gtNewNullCheck(op1, block); GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress); impPushOnStack(result, tiRetVal); break; } // For UNBOX.ANY load the struct from the box payload byref (the load will nullcheck) assert(opcode == CEE_UNBOX_ANY); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, op1, boxPayloadOffset); impPushOnStack(boxPayloadAddress, tiRetVal); oper = GT_OBJ; goto OBJ; } else { JITDUMP("\nUnable to optimize %s -- can't resolve type comparison\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); } } else { JITDUMP("\nUnable to optimize %s -- class for [%06u] not known\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", dspTreeID(op1)); } JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); // we are doing normal unboxing // inline the common case of the unbox helper // UNBOX(exp) morphs into // clone = pop(exp); // ((*clone == typeToken) ? nop : helper(clone, typeToken)); // push(clone + TARGET_POINTER_SIZE) // GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone1")); op1 = gtNewMethodTableLookup(op1); GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2); op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone2")); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = gtNewHelperCallNode(helper, TYP_VOID, gtNewCallArgs(op2, op1)); op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1); op1 = gtNewQmarkNode(TYP_VOID, condBox, op1->AsColon()); // QMARK nodes cannot reside on the evaluation stack. Because there // may be other trees on the evaluation stack that side-effect the // sources of the UNBOX operation we must spill the stack. impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Create the address-expression to reference past the object header // to the beginning of the value-type. Today this means adjusting // past the base of the objects vtable field which is pointer sized. op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2); } else { JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // Don't optimize, just call the helper and be done with it op1 = gtNewHelperCallNode(helper, (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), gtNewCallArgs(op2, op1)); if (op1->gtType == TYP_STRUCT) { op1->AsCall()->gtRetClsHnd = resolvedToken.hClass; } } assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref. (helper == CORINFO_HELP_UNBOX_NULLABLE && varTypeIsStruct(op1)) // UnboxNullable helper returns a struct. ); /* ---------------------------------------------------------------------- | \ helper | | | | \ | | | | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE | | \ | (which returns a BYREF) | (which returns a STRUCT) | | | opcode \ | | | |--------------------------------------------------------------------- | UNBOX | push the BYREF | spill the STRUCT to a local, | | | | push the BYREF to this local | |--------------------------------------------------------------------- | UNBOX_ANY | push a GT_OBJ of | push the STRUCT | | | the BYREF | For Linux when the | | | | struct is returned in two | | | | registers create a temp | | | | which address is passed to | | | | the unbox_nullable helper. | |--------------------------------------------------------------------- */ if (opcode == CEE_UNBOX) { if (helper == CORINFO_HELP_UNBOX_NULLABLE) { // Unbox nullable helper returns a struct type. // We need to spill it to a temp so than can take the address of it. // Here we need unsafe value cls check, since the address of struct is taken to be used // further along and potetially be exploitable. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable")); lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); } assert(op1->gtType == TYP_BYREF); } else { assert(opcode == CEE_UNBOX_ANY); if (helper == CORINFO_HELP_UNBOX) { // Normal unbox helper returns a TYP_BYREF. impPushOnStack(op1, tiRetVal); oper = GT_OBJ; goto OBJ; } assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!"); #if FEATURE_MULTIREG_RET if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass, CorInfoCallConvExtension::Managed)) { // Unbox nullable helper returns a TYP_STRUCT. // For the multi-reg case we need to spill it to a temp so that // we can pass the address to the unbox_nullable jit helper. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable")); lvaTable[tmp].lvIsMultiRegArg = true; lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); // In this case the return value of the unbox helper is TYP_BYREF. // Make sure the right type is placed on the operand type stack. impPushOnStack(op1, tiRetVal); // Load the struct. oper = GT_OBJ; assert(op1->gtType == TYP_BYREF); goto OBJ; } else #endif // !FEATURE_MULTIREG_RET { // If non register passable struct we have it materialized in the RetBuf. assert(op1->gtType == TYP_STRUCT); tiRetVal = verMakeTypeInfo(resolvedToken.hClass); assert(tiRetVal.IsValueClass()); } } impPushOnStack(op1, tiRetVal); } break; case CEE_BOX: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Box); JITDUMP(" %08X", resolvedToken.token); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); // Note BOX can be used on things that are not value classes, in which // case we get a NOP. However the verifier's view of the type on the // stack changes (in generic code a 'T' becomes a 'boxed T') if (!eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing BOX(refClass) as NOP\n"); verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal; break; } // Look ahead for box idioms int matched = impBoxPatternMatch(&resolvedToken, codeAddr + sz, codeEndp); if (matched >= 0) { // Skip the matched IL instructions sz += matched; break; } impImportAndPushBox(&resolvedToken); if (compDonotInline()) { return; } } break; case CEE_SIZEOF: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass)); impPushOnStack(op1, tiRetVal); break; case CEE_CASTCLASS: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; /* Pop the address and create the 'checked cast' helper call */ // At this point we expect typeRef to contain the token, op1 to contain the value being cast, // and op2 to contain code that creates the type handle corresponding to typeRef CASTCLASS: { GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the chkcastany call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Check the object on the stack for the type-cast // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true, opcodeOffs); } if (compDonotInline()) { return; } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); } } break; case CEE_THROW: // Any block with a throw is rarely executed. block->bbSetRunRarely(); // Pop the exception object and create the 'throw' helper call op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewCallArgs(impPopStack().val)); // Fall through to clear out the eval stack. EVAL_APPEND: if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); goto APPEND; case CEE_RETHROW: assert(!compIsForInlining()); if (info.compXcptnsCount == 0) { BADCODE("rethrow outside catch"); } /* Create the 'rethrow' helper call */ op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID); goto EVAL_APPEND; case CEE_INITOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = gtNewIconNode(0); // Value op1 = impPopStack().val; // Dest if (eeIsValueClass(resolvedToken.hClass)) { op1 = gtNewStructVal(resolvedToken.hClass, op1); if (op1->OperIs(GT_OBJ)) { gtSetObjGcInfo(op1->AsObj()); } } else { size = info.compCompHnd->getClassSize(resolvedToken.hClass); assert(size == TARGET_POINTER_SIZE); op1 = gtNewBlockVal(op1, size); } op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); goto SPILL_APPEND; case CEE_INITBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Value op1 = impPopStack().val; // Dst addr if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); } else { if (!op2->IsIntegralConst(0)) { op2 = gtNewOperNode(GT_INIT_VAL, TYP_INT, op2); } op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Src addr op1 = impPopStack().val; // Dst addr if (op2->OperGet() == GT_ADDR) { op2 = op2->AsOp()->gtOp1; } else { op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2); } if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, true); } else { op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (!eeIsValueClass(resolvedToken.hClass)) { op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; impPushOnStack(op1, typeInfo()); opcode = CEE_STIND_REF; lclTyp = TYP_REF; goto STIND; } op2 = impPopStack().val; // Src op1 = impPopStack().val; // Dest op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0)); goto SPILL_APPEND; case CEE_STOBJ: { assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; } if (lclTyp == TYP_REF) { opcode = CEE_STIND_REF; goto STIND; } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lclTyp = JITtype2varType(jitTyp); goto STIND; } op2 = impPopStack().val; // Value op1 = impPopStack().val; // Ptr assertImp(varTypeIsStruct(op2)); op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED)) { op1->gtFlags |= GTF_BLK_UNALIGNED; } goto SPILL_APPEND; } case CEE_MKREFANY: assert(!compIsForInlining()); // Being lazy here. Refanys are tricky in terms of gc tracking. // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany. JITDUMP("disabling struct promotion because of mkrefany\n"); fgNoStructPromotion = true; oper = GT_MKREFANY; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken, nullptr, true); if (op2 == nullptr) { // compDonotInline() return; } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec. // But JIT32 allowed it, so we continue to allow it. assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT); // MKREFANY returns a struct. op2 is the class token. op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2); impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass())); break; case CEE_LDOBJ: { oper = GT_OBJ; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); OBJ: tiRetVal = verMakeTypeInfo(resolvedToken.hClass); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; opcode = CEE_LDIND_REF; goto LDIND; } op1 = impPopStack().val; assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL); CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1); // Could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF; assertImp(varTypeIsArithmetic(op1->gtType)); } else { // OBJ returns a struct // and an inline argument which is the class token of the loaded obj op1 = gtNewObjNode(resolvedToken.hClass, op1); } op1->gtFlags |= GTF_EXCEPT; if (prefixFlags & PREFIX_UNALIGNED) { op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; } case CEE_LDLEN: op1 = impPopStack().val; if (opts.OptimizationEnabled()) { /* Use GT_ARR_LENGTH operator so rng check opts see this */ GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length, block); op1 = arrLen; } else { /* Create the expression "*(array_addr + ArrLenOffs)" */ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL)); op1 = gtNewIndir(TYP_INT, op1); } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); break; case CEE_BREAK: op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID); goto SPILL_APPEND; case CEE_NOP: if (opts.compDbgCode) { op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); goto SPILL_APPEND; } break; /******************************** NYI *******************************/ case 0xCC: OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n"); FALLTHROUGH; case CEE_ILLEGAL: case CEE_MACRO_END: default: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); return; } BADCODE3("unknown opcode", ": %02X", (int)opcode); } codeAddr += sz; prevOpcode = opcode; prefixFlags = 0; } return; #undef _impResolveToken } #ifdef _PREFAST_ #pragma warning(pop) #endif // Push a local/argument treeon the operand stack void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal) { tiRetVal.NormaliseForStack(); if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr()) { tiRetVal.SetUninitialisedObjRef(); } impPushOnStack(op, tiRetVal); } //------------------------------------------------------------------------ // impCreateLocal: create a GT_LCL_VAR node to access a local that might need to be normalized on load // // Arguments: // lclNum -- The index into lvaTable // offset -- The offset to associate with the node // // Returns: // The node // GenTreeLclVar* Compiler::impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)) { var_types lclTyp; if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } return gtNewLclvNode(lclNum, lclTyp DEBUGARG(offset)); } // Load a local/argument on the operand stack // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal) { impPushVar(impCreateLocalNode(lclNum DEBUGARG(offset)), tiRetVal); } // Load an argument on the operand stack // Shared by the various CEE_LDARG opcodes // ilArgNum is the argument index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset) { Verify(ilArgNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { if (ilArgNum >= info.compArgsCount) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER); return; } impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo), impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo); } else { if (ilArgNum >= info.compArgsCount) { BADCODE("Bad IL"); } unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } impLoadVar(lclNum, offset); } } // Load a local on the operand stack // Shared by the various CEE_LDLOC opcodes // ilLclNum is the local index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) { if (compIsForInlining()) { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER); return; } // Get the local type var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo; typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo; /* Have we allocated a temp for this local? */ unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp")); // All vars of inlined methods should be !lvNormalizeOnLoad() assert(!lvaTable[lclNum].lvNormalizeOnLoad()); lclTyp = genActualType(lclTyp); impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal); } else { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { BADCODE("Bad IL"); } unsigned lclNum = info.compArgsCount + ilLclNum; impLoadVar(lclNum, offset); } } #ifdef TARGET_ARM /************************************************************************************** * * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the * dst struct, because struct promotion will turn it into a float/double variable while * the rhs will be an int/long variable. We don't code generate assignment of int into * a float, but there is nothing that might prevent us from doing so. The tree however * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int)) * * tmpNum - the lcl dst variable num that is a struct. * src - the src tree assigned to the dest that is a struct/int (when varargs call.) * hClass - the type handle for the struct variable. * * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play, * however, we could do a codegen of transferring from int to float registers * (transfer, not a cast.) * */ void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass) { if (src->gtOper == GT_CALL && src->AsCall()->IsVarargs() && IsHfa(hClass)) { int hfaSlots = GetHfaCount(hClass); var_types hfaType = GetHfaType(hClass); // If we have varargs we morph the method's return type to be "int" irrespective of its original // type: struct/float at importer because the ABI calls out return in integer registers. // We don't want struct promotion to replace an expression like this: // lclFld_int = callvar_int() into lclFld_float = callvar_int(); // This means an int is getting assigned to a float without a cast. Prevent the promotion. if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) || (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES)) { // Make sure this struct type stays as struct so we can receive the call in a struct. lvaTable[tmpNum].lvIsMultiRegRet = true; } } } #endif // TARGET_ARM #if FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impAssignMultiRegTypeToVar: ensure calls that return structs in multiple // registers return values to suitable temps. // // Arguments: // op -- call returning a struct in registers // hClass -- class handle for struct // // Returns: // Tree with reference to struct local to use as call return value. GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL); GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. ret->gtFlags |= GTF_DONT_CSE; assert(IsMultiRegReturnedType(hClass, callConv)); // Mark the var so that fields are not promoted and stay together. lvaTable[tmpNum].lvIsMultiRegRet = true; return ret; } #endif // FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impReturnInstruction: import a return or an explicit tail call // // Arguments: // prefixFlags -- active IL prefixes // opcode -- [in, out] IL opcode // // Returns: // True if import was successful (may fail for some inlinees) // bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) { const bool isTailCall = (prefixFlags & PREFIX_TAILCALL) != 0; #ifdef DEBUG // If we are importing an inlinee and have GC ref locals we always // need to have a spill temp for the return value. This temp // should have been set up in advance, over in fgFindBasicBlocks. if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID)) { assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM); } #endif // DEBUG GenTree* op2 = nullptr; GenTree* op1 = nullptr; CORINFO_CLASS_HANDLE retClsHnd = nullptr; if (info.compRetType != TYP_VOID) { StackEntry se = impPopStack(); retClsHnd = se.seTypeInfo.GetClassHandle(); op2 = se.val; if (!compIsForInlining()) { impBashVarAddrsToI(op2); op2 = impImplicitIorI4Cast(op2, info.compRetType); op2 = impImplicitR4orR8Cast(op2, info.compRetType); // Note that we allow TYP_I_IMPL<->TYP_BYREF transformation, but only TYP_I_IMPL<-TYP_REF. assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) || ((op2->TypeGet() == TYP_I_IMPL) && TypeIs(info.compRetType, TYP_BYREF)) || (op2->TypeIs(TYP_BYREF, TYP_REF) && (info.compRetType == TYP_I_IMPL)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) || (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType))); #ifdef DEBUG if (!isTailCall && opts.compGcChecks && (info.compRetType == TYP_REF)) { // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with // one-return BB. assert(op2->gtType == TYP_REF); // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTreeCall::Use* args = gtNewCallArgs(op2); op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op2); } } #endif } else { if (verCurrentState.esStackDepth != 0) { assert(compIsForInlining()); JITDUMP("CALLSITE_COMPILATION_ERROR: inlinee's stack is not empty."); compInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); return false; } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (before normalization) =>\n"); gtDispTree(op2); } #endif // Make sure the type matches the original call. var_types returnType = genActualType(op2->gtType); var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType; if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT)) { originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass); } if (returnType != originalCallType) { // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa. // Allow TYP_REF to be returned as TYP_I_IMPL and NOT vice verse. if ((TypeIs(returnType, TYP_BYREF, TYP_REF) && (originalCallType == TYP_I_IMPL)) || ((returnType == TYP_I_IMPL) && TypeIs(originalCallType, TYP_BYREF))) { JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); } else { JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH); return false; } } // Below, we are going to set impInlineInfo->retExpr to the tree with the return // expression. At this point, retExpr could already be set if there are multiple // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of // the other blocks already set it. If there is only a single return block, // retExpr shouldn't be set. However, this is not true if we reimport a block // with a return. In that case, retExpr will be set, then the block will be // reimported, but retExpr won't get cleared as part of setting the block to // be reimported. The reimported retExpr value should be the same, so even if // we don't unconditionally overwrite it, it shouldn't matter. if (info.compRetNativeType != TYP_STRUCT) { // compRetNativeType is not TYP_STRUCT. // This implies it could be either a scalar type or SIMD vector type or // a struct type that can be normalized to a scalar type. if (varTypeIsStruct(info.compRetType)) { noway_assert(info.compRetBuffArg == BAD_VAR_NUM); // adjust the type away from struct to integral // and no normalizing op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); } else { // Do we have to normalize? var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType); if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) && fgCastNeeded(op2, fncRealRetType)) { // Small-typed return values are normalized by the callee op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType); } } if (fgNeedReturnSpillTemp()) { assert(info.compRetNativeType != TYP_VOID && (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals())); // If this method returns a ref type, track the actual types seen // in the returns. if (info.compRetType == TYP_REF) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull); if (impInlineInfo->retExpr == nullptr) { // This is the first return, so best known type is the type // of this return value. impInlineInfo->retExprClassHnd = returnClsHnd; impInlineInfo->retExprClassHndIsExact = isExact; } else if (impInlineInfo->retExprClassHnd != returnClsHnd) { // This return site type differs from earlier seen sites, // so reset the info and we'll fall back to using the method's // declared return type for the return spill temp. impInlineInfo->retExprClassHnd = nullptr; impInlineInfo->retExprClassHndIsExact = false; } } impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType; GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); op2 = tmpOp2; #ifdef DEBUG if (impInlineInfo->retExpr) { // Some other block(s) have seen the CEE_RET first. // Better they spilled to the same temp. assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR); assert(impInlineInfo->retExpr->AsLclVarCommon()->GetLclNum() == op2->AsLclVarCommon()->GetLclNum()); } #endif } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (after normalization) =>\n"); gtDispTree(op2); } #endif // Report the return expression impInlineInfo->retExpr = op2; } else { // compRetNativeType is TYP_STRUCT. // This implies that struct return via RetBuf arg or multi-reg struct return GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall(); // Assign the inlinee return into a spill temp. // spill temp only exists if there are multiple return points if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM) { // in this case we have to insert multiple struct copies to the temp // and the retexpr is just the temp. assert(info.compRetNativeType != TYP_VOID); assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()); impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); } #if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) #if defined(TARGET_ARM) // TODO-ARM64-NYI: HFA // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the // next ifdefs could be refactored in a single method with the ifdef inside. if (IsHfa(retClsHnd)) { // Same as !IsHfa but just don't bother with impAssignStructPtr. #else // defined(UNIX_AMD64_ABI) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { // If single eightbyte, the return type would have been normalized and there won't be a temp var. // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes - // max allowed.) assert(retRegCount == MAX_RET_REG_COUNT); // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr. CLANG_FORMAT_COMMENT_ANCHOR; #endif // defined(UNIX_AMD64_ABI) if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { #if defined(TARGET_ARM) impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType); #else // defined(UNIX_AMD64_ABI) // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); #endif // defined(UNIX_AMD64_ABI) } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_ARM64) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount >= 2); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_X86) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount == MAX_RET_REG_COUNT); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #endif // defined(TARGET_ARM64) { assert(iciCall->HasRetBufArg()); GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->GetNode()); // spill temp only exists if there are multiple return points if (fgNeedReturnSpillTemp()) { // if this is the first return we have seen set the retExpr if (!impInlineInfo->retExpr) { impInlineInfo->retExpr = impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType), retClsHnd, (unsigned)CHECK_SPILL_ALL); } } else { impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); } } } if (impInlineInfo->retExpr != nullptr) { impInlineInfo->retBB = compCurBB; } } } if (compIsForInlining()) { return true; } if (info.compRetType == TYP_VOID) { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } else if (info.compRetBuffArg != BAD_VAR_NUM) { // Assign value to return buff (first param) GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset())); op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX). CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // x64 (System V and Win64) calling convention requires to // return the implicit return buffer explicitly (in RAX). // Change the return type to be BYREF. op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); #else // !defined(TARGET_AMD64) // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX). // In such case the return value of the function is changed to BYREF. // If profiler hook is not needed the return type of the function is TYP_VOID. if (compIsProfilerHookNeeded()) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #if defined(TARGET_ARM64) // On ARM64, the native instance calling convention variant // requires the implicit ByRef to be explicitly returned. else if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif #if defined(TARGET_X86) else if (info.compCallConv != CorInfoCallConvExtension::Managed) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif else { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } #endif // !defined(TARGET_AMD64) } else if (varTypeIsStruct(info.compRetType)) { #if !FEATURE_MULTIREG_RET // For both ARM architectures the HFA native types are maintained as structs. // Also on System V AMD64 the multireg structs returns are also left as structs. noway_assert(info.compRetNativeType != TYP_STRUCT); #endif op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); // return op2 var_types returnType = info.compRetType; op1 = gtNewOperNode(GT_RETURN, genActualType(returnType), op2); } else { // return op2 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2); } // We must have imported a tailcall and jumped to RET if (isTailCall) { assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode)); opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES // impImportCall() would have already appended TYP_VOID calls if (info.compRetType == TYP_VOID) { return true; } } impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif return true; } /***************************************************************************** * Mark the block as unimported. * Note that the caller is responsible for calling impImportBlockPending(), * with the appropriate stack-state */ inline void Compiler::impReimportMarkBlock(BasicBlock* block) { #ifdef DEBUG if (verbose && (block->bbFlags & BBF_IMPORTED)) { printf("\n" FMT_BB " will be reimported\n", block->bbNum); } #endif block->bbFlags &= ~BBF_IMPORTED; } /***************************************************************************** * Mark the successors of the given block as unimported. * Note that the caller is responsible for calling impImportBlockPending() * for all the successors, with the appropriate stack-state. */ void Compiler::impReimportMarkSuccessors(BasicBlock* block) { for (BasicBlock* const succBlock : block->Succs()) { impReimportMarkBlock(succBlock); } } /***************************************************************************** * * Filter wrapper to handle only passed in exception code * from it). */ LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam) { if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION) { return EXCEPTION_EXECUTE_HANDLER; } return EXCEPTION_CONTINUE_SEARCH; } void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart) { assert(block->hasTryIndex()); assert(!compIsForInlining()); unsigned tryIndex = block->getTryIndex(); EHblkDsc* HBtab = ehGetDsc(tryIndex); if (isTryStart) { assert(block->bbFlags & BBF_TRY_BEG); // The Stack must be empty // if (block->bbStkDepth != 0) { BADCODE("Evaluation stack must be empty on entry into a try block"); } } // Save the stack contents, we'll need to restore it later // SavedStack blockState; impSaveStackState(&blockState, false); while (HBtab != nullptr) { if (isTryStart) { // Are we verifying that an instance constructor properly initializes it's 'this' pointer once? // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions // if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init)) { // We trigger an invalid program exception here unless we have a try/fault region. // if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter()) { BADCODE( "The 'this' pointer of an instance constructor is not intialized upon entry to a try region"); } else { // Allow a try/fault region to proceed. assert(HBtab->HasFaultHandler()); } } } // Recursively process the handler block, if we haven't already done so. BasicBlock* hndBegBB = HBtab->ebdHndBeg; if (((hndBegBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(hndBegBB) == 0)) { // Construct the proper verification stack state // either empty or one that contains just // the Exception Object that we are dealing with // verCurrentState.esStackDepth = 0; if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp)) { CORINFO_CLASS_HANDLE clsHnd; if (HBtab->HasFilter()) { clsHnd = impGetObjectClass(); } else { CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = HBtab->ebdTyp; resolvedToken.tokenType = CORINFO_TOKENKIND_Class; info.compCompHnd->resolveToken(&resolvedToken); clsHnd = resolvedToken.hClass; } // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdHndBeg! hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false); } // Queue up the handler for importing // impImportBlockPending(hndBegBB); } // Process the filter block, if we haven't already done so. if (HBtab->HasFilter()) { /* @VERIFICATION : Ideally the end of filter state should get propagated to the catch handler, this is an incompleteness, but is not a security/compliance issue, since the only interesting state is the 'thisInit' state. */ BasicBlock* filterBB = HBtab->ebdFilter; if (((filterBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(filterBB) == 0)) { verCurrentState.esStackDepth = 0; // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdFilter! const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB); filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter); impImportBlockPending(filterBB); } } // This seems redundant ....?? if (verTrackObjCtorInitState && HBtab->HasFaultHandler()) { /* Recursively process the handler block */ verCurrentState.esStackDepth = 0; // Queue up the fault handler for importing // impImportBlockPending(HBtab->ebdHndBeg); } // Now process our enclosing try index (if any) // tryIndex = HBtab->ebdEnclosingTryIndex; if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { HBtab = nullptr; } else { HBtab = ehGetDsc(tryIndex); } } // Restore the stack contents impRestoreStackState(&blockState); } //*************************************************************** // Import the instructions for the given basic block. Perform // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first // time, or whose verification pre-state is changed. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif void Compiler::impImportBlock(BasicBlock* block) { // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to // handle them specially. In particular, there is no IL to import for them, but we do need // to mark them as imported and put their successors on the pending import list. if (block->bbFlags & BBF_INTERNAL) { JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum); block->bbFlags |= BBF_IMPORTED; for (BasicBlock* const succBlock : block->Succs()) { impImportBlockPending(succBlock); } return; } bool markImport; assert(block); /* Make the block globaly available */ compCurBB = block; #ifdef DEBUG /* Initialize the debug variables */ impCurOpcName = "unknown"; impCurOpcOffs = block->bbCodeOffs; #endif /* Set the current stack state to the merged result */ verResetCurrentState(block, &verCurrentState); /* Now walk the code and import the IL into GenTrees */ struct FilterVerificationExceptionsParam { Compiler* pThis; BasicBlock* block; }; FilterVerificationExceptionsParam param; param.pThis = this; param.block = block; PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param) { /* @VERIFICATION : For now, the only state propagation from try to it's handler is "thisInit" state (stack is empty at start of try). In general, for state that we track in verification, we need to model the possibility that an exception might happen at any IL instruction, so we really need to merge all states that obtain between IL instructions in a try block into the start states of all handlers. However we do not allow the 'this' pointer to be uninitialized when entering most kinds try regions (only try/fault are allowed to have an uninitialized this pointer on entry to the try) Fortunately, the stack is thrown away when an exception leads to a handler, so we don't have to worry about that. We DO, however, have to worry about the "thisInit" state. But only for the try/fault case. The only allowed transition is from TIS_Uninit to TIS_Init. So for a try/fault region for the fault handler block we will merge the start state of the try begin and the post-state of each block that is part of this try region */ // merge the start state of the try begin // if (pParam->block->bbFlags & BBF_TRY_BEG) { pParam->pThis->impVerifyEHBlock(pParam->block, true); } pParam->pThis->impImportBlockCode(pParam->block); // As discussed above: // merge the post-state of each block that is part of this try region // if (pParam->block->hasTryIndex()) { pParam->pThis->impVerifyEHBlock(pParam->block, false); } } PAL_EXCEPT_FILTER(FilterVerificationExceptions) { verHandleVerificationFailure(block DEBUGARG(false)); } PAL_ENDTRY if (compDonotInline()) { return; } assert(!compDonotInline()); markImport = false; SPILLSTACK: unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks bool reimportSpillClique = false; BasicBlock* tgtBlock = nullptr; /* If the stack is non-empty, we might have to spill its contents */ if (verCurrentState.esStackDepth != 0) { impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something // on the stack, its lifetime is hard to determine, simply // don't reuse such temps. Statement* addStmt = nullptr; /* Do the successors of 'block' have any other predecessors ? We do not want to do some of the optimizations related to multiRef if we can reimport blocks */ unsigned multRef = impCanReimport ? unsigned(~0) : 0; switch (block->bbJumpKind) { case BBJ_COND: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_JTRUE); /* Note if the next block has more than one ancestor */ multRef |= block->bbNext->bbRefs; /* Does the next block have temps assigned? */ baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; if (baseTmp != NO_BASE_TMP) { break; } /* Try the target of the jump then */ multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_ALWAYS: multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_NONE: multRef |= block->bbNext->bbRefs; baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; break; case BBJ_SWITCH: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_SWITCH); for (BasicBlock* const tgtBlock : block->SwitchTargets()) { multRef |= tgtBlock->bbRefs; // Thanks to spill cliques, we should have assigned all or none assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn)); baseTmp = tgtBlock->bbStkTempsIn; if (multRef > 1) { break; } } break; case BBJ_CALLFINALLY: case BBJ_EHCATCHRET: case BBJ_RETURN: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_THROW: NO_WAY("can't have 'unreached' end of BB with non-empty stack"); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } assert(multRef >= 1); /* Do we have a base temp number? */ bool newTemps = (baseTmp == NO_BASE_TMP); if (newTemps) { /* Grab enough temps for the whole stack */ baseTmp = impGetSpillTmpBase(block); } /* Spill all stack entries into temps */ unsigned level, tempNum; JITDUMP("\nSpilling stack entries into temps\n"); for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++) { GenTree* tree = verCurrentState.esStack[level].val; /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from the other. This should merge to a byref in unverifiable code. However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the successor would be imported assuming there was a TYP_I_IMPL on the stack. Thus the value would not get GC-tracked. Hence, change the temp to TYP_BYREF and reimport the successors. Note: We should only allow this in unverifiable code. */ if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL) { lvaTable[tempNum].lvType = TYP_BYREF; impReimportMarkSuccessors(block); markImport = true; } #ifdef TARGET_64BIT if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "native int". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_I_IMPL; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL) { // Spill clique has decided this should be "native int", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } // Consider the case where one branch left a 'byref' on the stack and the other leaves // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64 // behavior instead of asserting and then generating bad code (where we save/restore the // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been // imported already, we need to change the type of the local and reimport the spill clique. // If the 'byref' side has imported, we insert a cast from int to 'native int' to match // the 'byref' size. if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "byref". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_BYREF; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF) { // Spill clique has decided this should be "byref", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique size. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } #endif // TARGET_64BIT if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT) { // Some other block in the spill clique set this to "float", but now we have "double". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_DOUBLE; reimportSpillClique = true; } else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE) { // Spill clique has decided this should be "double", but this block only pushes a "float". // Insert a cast to "double" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE); } /* If addStmt has a reference to tempNum (can only happen if we are spilling to the temps already used by a previous block), we need to spill addStmt */ if (addStmt != nullptr && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum)) { GenTree* addTree = addStmt->GetRootNode(); if (addTree->gtOper == GT_JTRUE) { GenTree* relOp = addTree->AsOp()->gtOp1; assert(relOp->OperIsCompare()); var_types type = genActualType(relOp->AsOp()->gtOp1->TypeGet()); if (gtHasRef(relOp->AsOp()->gtOp1, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1")); impAssignTempGen(temp, relOp->AsOp()->gtOp1, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type); } if (gtHasRef(relOp->AsOp()->gtOp2, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2")); impAssignTempGen(temp, relOp->AsOp()->gtOp2, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type); } } else { assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet())); unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH")); impAssignTempGen(temp, addTree->AsOp()->gtOp1, level); addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet())); } } /* Spill the stack entry, and replace with the temp */ if (!impSpillStackEntry(level, tempNum #ifdef DEBUG , true, "Spill Stack Entry" #endif )) { if (markImport) { BADCODE("bad stack state"); } // Oops. Something went wrong when spilling. Bad code. verHandleVerificationFailure(block DEBUGARG(true)); goto SPILLSTACK; } } /* Put back the 'jtrue'/'switch' if we removed it earlier */ if (addStmt != nullptr) { impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE); } } // Some of the append/spill logic works on compCurBB assert(compCurBB == block); /* Save the tree list in the block */ impEndTreeList(block); // impEndTreeList sets BBF_IMPORTED on the block // We do *NOT* want to set it later than this because // impReimportSpillClique might clear it if this block is both a // predecessor and successor in the current spill clique assert(block->bbFlags & BBF_IMPORTED); // If we had a int/native int, or float/double collision, we need to re-import if (reimportSpillClique) { // This will re-import all the successors of block (as well as each of their predecessors) impReimportSpillClique(block); // For blocks that haven't been imported yet, we still need to mark them as pending import. for (BasicBlock* const succ : block->Succs()) { if ((succ->bbFlags & BBF_IMPORTED) == 0) { impImportBlockPending(succ); } } } else // the normal case { // otherwise just import the successors of block /* Does this block jump to any other blocks? */ for (BasicBlock* const succ : block->Succs()) { impImportBlockPending(succ); } } } #ifdef _PREFAST_ #pragma warning(pop) #endif /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Merges the current verification state into the verification state of "block" // (its "pre-state"). void Compiler::impImportBlockPending(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum); } #endif // We will add a block to the pending set if it has not already been imported (or needs to be re-imported), // or if it has, but merging in a predecessor's post-state changes the block's pre-state. // (When we're doing verification, we always attempt the merge to detect verification errors.) // If the block has not been imported, add to pending set. bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0); // Initialize bbEntryState just the first time we try to add this block to the pending list // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set // We use NULL to indicate the 'common' state to avoid memory allocation if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) && (impGetPendingBlockMember(block) == 0)) { verInitBBEntryState(block, &verCurrentState); assert(block->bbStkDepth == 0); block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth); assert(addToPending); assert(impGetPendingBlockMember(block) == 0); } else { // The stack should have the same height on entry to the block from all its predecessors. if (block->bbStkDepth != verCurrentState.esStackDepth) { #ifdef DEBUG char buffer[400]; sprintf_s(buffer, sizeof(buffer), "Block at offset %4.4x to %4.4x in %0.200s entered with different stack depths.\n" "Previous depth was %d, current depth is %d", block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth, verCurrentState.esStackDepth); buffer[400 - 1] = 0; NO_WAY(buffer); #else NO_WAY("Block entered with different stack depths"); #endif } if (!addToPending) { return; } if (block->bbStkDepth > 0) { // We need to fix the types of any spill temps that might have changed: // int->native int, float->double, int->byref, etc. impRetypeEntryStateTemps(block); } // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_Unknown) PendingDsc; } dsc->pdBB = block; dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth; dsc->pdThisPtrInit = verCurrentState.thisInitialized; // Save the stack trees for later if (verCurrentState.esStackDepth) { impSaveStackState(&dsc->pdSavedStack, false); } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block. void Compiler::impReimportBlockPending(BasicBlock* block) { JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum); assert(block->bbFlags & BBF_IMPORTED); // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_ImpStack) PendingDsc; } dsc->pdBB = block; if (block->bbEntryState) { dsc->pdThisPtrInit = block->bbEntryState->thisInitialized; dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth; dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack; } else { dsc->pdThisPtrInit = TIS_Bottom; dsc->pdSavedStack.ssDepth = 0; dsc->pdSavedStack.ssTrees = nullptr; } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp) { if (comp->impBlockListNodeFreeList == nullptr) { return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1); } else { BlockListNode* res = comp->impBlockListNodeFreeList; comp->impBlockListNodeFreeList = res->m_next; return res; } } void Compiler::FreeBlockListNode(Compiler::BlockListNode* node) { node->m_next = impBlockListNodeFreeList; impBlockListNodeFreeList = node; } void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback) { bool toDo = true; noway_assert(!fgComputePredsDone); if (!fgCheapPredsValid) { fgComputeCheapPreds(); } BlockListNode* succCliqueToDo = nullptr; BlockListNode* predCliqueToDo = new (this) BlockListNode(block); while (toDo) { toDo = false; // Look at the successors of every member of the predecessor to-do list. while (predCliqueToDo != nullptr) { BlockListNode* node = predCliqueToDo; predCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlock* const succ : blk->Succs()) { // If it's not already in the clique, add it, and also add it // as a member of the successor "toDo" set. if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0) { callback->Visit(SpillCliqueSucc, succ); impSpillCliqueSetMember(SpillCliqueSucc, succ, 1); succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo); toDo = true; } } } // Look at the predecessors of every member of the successor to-do list. while (succCliqueToDo != nullptr) { BlockListNode* node = succCliqueToDo; succCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next) { BasicBlock* predBlock = pred->block; // If it's not already in the clique, add it, and also add it // as a member of the predecessor "toDo" set. if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0) { callback->Visit(SpillCliquePred, predBlock); impSpillCliqueSetMember(SpillCliquePred, predBlock, 1); predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo); toDo = true; } } } } // If this fails, it means we didn't walk the spill clique properly and somehow managed // miss walking back to include the predecessor we started from. // This most likely cause: missing or out of date bbPreds assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0); } void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliqueSucc) { assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor. blk->bbStkTempsIn = m_baseTmp; } else { assert(predOrSucc == SpillCliquePred); assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor. blk->bbStkTempsOut = m_baseTmp; } } void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { // For Preds we could be a little smarter and just find the existing store // and re-type it/add a cast, but that is complicated and hopefully very rare, so // just re-import the whole block (just like we do for successors) if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0)) { // If we haven't imported this block and we're not going to (because it isn't on // the pending list) then just ignore it for now. // This block has either never been imported (EntryState == NULL) or it failed // verification. Neither state requires us to force it to be imported now. assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION)); return; } // For successors we have a valid verCurrentState, so just mark them for reimport // the 'normal' way // Unlike predecessors, we *DO* need to reimport the current block because the // initial import had the wrong entry state types. // Similarly, blocks that are currently on the pending list, still need to call // impImportBlockPending to fixup their entry state. if (predOrSucc == SpillCliqueSucc) { m_pComp->impReimportMarkBlock(blk); // Set the current stack state to that of the blk->bbEntryState m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState); assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry()); m_pComp->impImportBlockPending(blk); } else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0)) { // As described above, we are only visiting predecessors so they can // add the appropriate casts, since we have already done that for the current // block, it does not need to be reimported. // Nor do we need to reimport blocks that are still pending, but not yet // imported. // // For predecessors, we have no state to seed the EntryState, so we just have // to assume the existing one is correct. // If the block is also a successor, it will get the EntryState properly // updated when it is visited as a successor in the above "if" block. assert(predOrSucc == SpillCliquePred); m_pComp->impReimportBlockPending(blk); } } // Re-type the incoming lclVar nodes to match the varDsc. void Compiler::impRetypeEntryStateTemps(BasicBlock* blk) { if (blk->bbEntryState != nullptr) { EntryState* es = blk->bbEntryState; for (unsigned level = 0; level < es->esStackDepth; level++) { GenTree* tree = es->esStack[level].val; if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD)) { es->esStack[level].val->gtType = lvaGetDesc(tree->AsLclVarCommon())->TypeGet(); } } } } unsigned Compiler::impGetSpillTmpBase(BasicBlock* block) { if (block->bbStkTempsOut != NO_BASE_TMP) { return block->bbStkTempsOut; } #ifdef DEBUG if (verbose) { printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // Otherwise, choose one, and propagate to all members of the spill clique. // Grab enough temps for the whole stack. unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); SetSpillTempsBase callback(baseTmp); // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor // to one spill clique, and similarly can only be the successor to one spill clique impWalkSpillCliqueFromPred(block, &callback); return baseTmp; } void Compiler::impReimportSpillClique(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // If we get here, it is because this block is already part of a spill clique // and one predecessor had an outgoing live stack slot of type int, and this // block has an outgoing live stack slot of type native int. // We need to reset these before traversal because they have already been set // by the previous walk to determine all the members of the spill clique. impInlineRoot()->impSpillCliquePredMembers.Reset(); impInlineRoot()->impSpillCliqueSuccMembers.Reset(); ReimportSpillClique callback(this); impWalkSpillCliqueFromPred(block, &callback); } // Set the pre-state of "block" (which should not have a pre-state allocated) to // a copy of "srcState", cloning tree pointers as required. void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState) { if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom) { block->bbEntryState = nullptr; return; } block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1); // block->bbEntryState.esRefcount = 1; block->bbEntryState->esStackDepth = srcState->esStackDepth; block->bbEntryState->thisInitialized = TIS_Bottom; if (srcState->esStackDepth > 0) { block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]); unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry); memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize); for (unsigned level = 0; level < srcState->esStackDepth; level++) { GenTree* tree = srcState->esStack[level].val; block->bbEntryState->esStack[level].val = gtCloneExpr(tree); } } if (verTrackObjCtorInitState) { verSetThisInit(block, srcState->thisInitialized); } return; } void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis) { assert(tis != TIS_Bottom); // Precondition. if (block->bbEntryState == nullptr) { block->bbEntryState = new (this, CMK_Unknown) EntryState(); } block->bbEntryState->thisInitialized = tis; } /* * Resets the current state to the state at the start of the basic block */ void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState) { if (block->bbEntryState == nullptr) { destState->esStackDepth = 0; destState->thisInitialized = TIS_Bottom; return; } destState->esStackDepth = block->bbEntryState->esStackDepth; if (destState->esStackDepth > 0) { unsigned stackSize = destState->esStackDepth * sizeof(StackEntry); memcpy(destState->esStack, block->bbStackOnEntry(), stackSize); } destState->thisInitialized = block->bbThisOnEntry(); return; } ThisInitState BasicBlock::bbThisOnEntry() const { return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom; } unsigned BasicBlock::bbStackDepthOnEntry() const { return (bbEntryState ? bbEntryState->esStackDepth : 0); } void BasicBlock::bbSetStack(void* stackBuffer) { assert(bbEntryState); assert(stackBuffer); bbEntryState->esStack = (StackEntry*)stackBuffer; } StackEntry* BasicBlock::bbStackOnEntry() const { assert(bbEntryState); return bbEntryState->esStack; } void Compiler::verInitCurrentState() { verTrackObjCtorInitState = false; verCurrentState.thisInitialized = TIS_Bottom; // initialize stack info verCurrentState.esStackDepth = 0; assert(verCurrentState.esStack != nullptr); // copy current state to entry state of first BB verInitBBEntryState(fgFirstBB, &verCurrentState); } Compiler* Compiler::impInlineRoot() { if (impInlineInfo == nullptr) { return this; } else { return impInlineInfo->InlineRoot; } } BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliquePred) { return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd()); } else { assert(predOrSucc == SpillCliqueSucc); return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd()); } } void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val) { if (predOrSucc == SpillCliquePred) { impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val); } else { assert(predOrSucc == SpillCliqueSucc); impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val); } } /***************************************************************************** * * Convert the instrs ("import") into our internal format (trees). The * basic flowgraph has already been constructed and is passed in. */ void Compiler::impImport() { #ifdef DEBUG if (verbose) { printf("*************** In impImport() for %s\n", info.compFullName); } #endif Compiler* inlineRoot = impInlineRoot(); if (info.compMaxStack <= SMALL_STACK_SIZE) { impStkSize = SMALL_STACK_SIZE; } else { impStkSize = info.compMaxStack; } if (this == inlineRoot) { // Allocate the stack contents verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } else { // This is the inlinee compiler, steal the stack from the inliner compiler // (after ensuring that it is large enough). if (inlineRoot->impStkSize < impStkSize) { inlineRoot->impStkSize = impStkSize; inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } verCurrentState.esStack = inlineRoot->verCurrentState.esStack; } // initialize the entry state at start of method verInitCurrentState(); // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase). if (this == inlineRoot) // These are only used on the root of the inlining tree. { // We have initialized these previously, but to size 0. Make them larger. impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2); } inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2); impBlockListNodeFreeList = nullptr; #ifdef DEBUG impLastILoffsStmt = nullptr; impNestedStackSpill = false; #endif impBoxTemp = BAD_VAR_NUM; impPendingList = impPendingFree = nullptr; // Skip leading internal blocks. // These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects. // BasicBlock* entryBlock = fgFirstBB; while (entryBlock->bbFlags & BBF_INTERNAL) { JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; if (entryBlock->bbJumpKind == BBJ_NONE) { entryBlock = entryBlock->bbNext; } else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } else { assert(!"unexpected bbJumpKind in entry sequence"); } } // Note for OSR we'd like to be able to verify this block must be // stack empty, but won't know that until we've imported...so instead // we'll BADCODE out if we mess up. // // (the concern here is that the runtime asks us to OSR a // different IL version than the one that matched the method that // triggered OSR). This should not happen but I might have the // IL versioning stuff wrong. // // TODO: we also currently expect this block to be a join point, // which we should verify over when we find jump targets. impImportBlockPending(entryBlock); /* Import blocks in the worker-list until there are no more */ while (impPendingList) { /* Remove the entry at the front of the list */ PendingDsc* dsc = impPendingList; impPendingList = impPendingList->pdNext; impSetPendingBlockMember(dsc->pdBB, 0); /* Restore the stack state */ verCurrentState.thisInitialized = dsc->pdThisPtrInit; verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth; if (verCurrentState.esStackDepth) { impRestoreStackState(&dsc->pdSavedStack); } /* Add the entry to the free list for reuse */ dsc->pdNext = impPendingFree; impPendingFree = dsc; /* Now import the block */ if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION) { verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true)); impEndTreeList(dsc->pdBB); } else { impImportBlock(dsc->pdBB); if (compDonotInline()) { return; } if (compIsForImportOnly()) { return; } } } #ifdef DEBUG if (verbose && info.compXcptnsCount) { printf("\nAfter impImport() added block for try,catch,finally"); fgDispBasicBlocks(); printf("\n"); } // Used in impImportBlockPending() for STRESS_CHK_REIMPORT for (BasicBlock* const block : Blocks()) { block->bbFlags &= ~BBF_VISITED; } #endif } // Checks if a typeinfo (usually stored in the type stack) is a struct. // The invariant here is that if it's not a ref or a method and has a class handle // it's a valuetype bool Compiler::impIsValueType(typeInfo* pTypeInfo) { if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd()) { return true; } else { return false; } } /***************************************************************************** * Check to see if the tree is the address of a local or the address of a field in a local. *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns true. */ bool Compiler::impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut) { if (tree->gtOper != GT_ADDR) { return false; } GenTree* op = tree->AsOp()->gtOp1; while (op->gtOper == GT_FIELD) { op = op->AsField()->GetFldObj(); if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL. { op = op->AsOp()->gtOp1; } else { return false; } } if (op->gtOper == GT_LCL_VAR) { if (lclVarTreeOut != nullptr) { *lclVarTreeOut = op; } return true; } else { return false; } } //------------------------------------------------------------------------ // impMakeDiscretionaryInlineObservations: make observations that help // determine the profitability of a discretionary inline // // Arguments: // pInlineInfo -- InlineInfo for the inline, or null for the prejit root // inlineResult -- InlineResult accumulating information about this inline // // Notes: // If inlining or prejitting the root, this method also makes // various observations about the method that factor into inline // decisions. It sets `compNativeSizeEstimate` as a side effect. void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult) { assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining. (pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen. ); // If we're really inlining, we should just have one result in play. assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult)); // If this is a "forceinline" method, the JIT probably shouldn't have gone // to the trouble of estimating the native code size. Even if it did, it // shouldn't be relying on the result of this method. assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE); // Note if the caller contains NEWOBJ or NEWARR. Compiler* rootCompiler = impInlineRoot(); if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY); } if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ); } bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0; if (isSpecialMethod) { if (calleeIsStatic) { inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR); } else { inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR); } } else if (!calleeIsStatic) { // Callee is an instance method. // // Check if the callee has the same 'this' as the root. if (pInlineInfo != nullptr) { GenTree* thisArg = pInlineInfo->iciCall->AsCall()->gtCallThisArg->GetNode(); assert(thisArg); bool isSameThis = impIsThis(thisArg); inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis); } } bool callsiteIsGeneric = (rootCompiler->info.compMethodInfo->args.sigInst.methInstCount != 0) || (rootCompiler->info.compMethodInfo->args.sigInst.classInstCount != 0); bool calleeIsGeneric = (info.compMethodInfo->args.sigInst.methInstCount != 0) || (info.compMethodInfo->args.sigInst.classInstCount != 0); if (!callsiteIsGeneric && calleeIsGeneric) { inlineResult->Note(InlineObservation::CALLSITE_NONGENERIC_CALLS_GENERIC); } // Inspect callee's arguments (and the actual values at the callsite for them) CORINFO_SIG_INFO sig = info.compMethodInfo->args; CORINFO_ARG_LIST_HANDLE sigArg = sig.args; GenTreeCall::Use* argUse = pInlineInfo == nullptr ? nullptr : pInlineInfo->iciCall->AsCall()->gtCallArgs; for (unsigned i = 0; i < info.compMethodInfo->args.numArgs; i++) { CORINFO_CLASS_HANDLE sigClass; CorInfoType corType = strip(info.compCompHnd->getArgType(&sig, sigArg, &sigClass)); GenTree* argNode = argUse == nullptr ? nullptr : argUse->GetNode()->gtSkipPutArgType(); if (corType == CORINFO_TYPE_CLASS) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); } else if (corType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT); } else if (corType == CORINFO_TYPE_BYREF) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); corType = info.compCompHnd->getChildType(sigClass, &sigClass); } if (argNode != nullptr) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE argCls = gtGetClassHandle(argNode, &isExact, &isNonNull); if (argCls != nullptr) { const bool isArgValueType = eeIsValueClass(argCls); // Exact class of the arg is known if (isExact && !isArgValueType) { inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS); if ((argCls != sigClass) && (sigClass != nullptr)) { // .. but the signature accepts a less concrete type. inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS_SIG_IS_NOT); } } // Arg is a reference type in the signature and a boxed value type was passed. else if (isArgValueType && (corType == CORINFO_TYPE_CLASS)) { inlineResult->Note(InlineObservation::CALLSITE_ARG_BOXED); } } if (argNode->OperIsConst()) { inlineResult->Note(InlineObservation::CALLSITE_ARG_CONST); } argUse = argUse->GetNext(); } sigArg = info.compCompHnd->getArgNext(sigArg); } // Note if the callee's return type is a value type if (info.compMethodInfo->args.retType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_RETURNS_STRUCT); } // Note if the callee's class is a promotable struct if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0) { assert(structPromotionHelper != nullptr); if (structPromotionHelper->CanPromoteStructType(info.compClassHnd)) { inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE); } inlineResult->Note(InlineObservation::CALLEE_CLASS_VALUETYPE); } #ifdef FEATURE_SIMD // Note if this method is has SIMD args or return value if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn) { inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD); } #endif // FEATURE_SIMD // Roughly classify callsite frequency. InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED; // If this is a prejit root, or a maximally hot block... if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->isMaxBBWeight())) { frequency = InlineCallsiteFrequency::HOT; } // No training data. Look for loop-like things. // We consider a recursive call loop-like. Do not give the inlining boost to the method itself. // However, give it to things nearby. else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) && (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle)) { frequency = InlineCallsiteFrequency::LOOP; } else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT)) { frequency = InlineCallsiteFrequency::WARM; } // Now modify the multiplier based on where we're called from. else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { frequency = InlineCallsiteFrequency::RARE; } else { frequency = InlineCallsiteFrequency::BORING; } // Also capture the block weight of the call site. // // In the prejit root case, assume at runtime there might be a hot call site // for this method, so we won't prematurely conclude this method should never // be inlined. // weight_t weight = 0; if (pInlineInfo != nullptr) { weight = pInlineInfo->iciBlock->bbWeight; } else { const weight_t prejitHotCallerWeight = 1000000.0; weight = prejitHotCallerWeight; } inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency)); inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, (int)(weight)); bool hasProfile = false; double profileFreq = 0.0; // If the call site has profile data, report the relative frequency of the site. // if ((pInlineInfo != nullptr) && rootCompiler->fgHaveSufficientProfileData()) { const weight_t callSiteWeight = pInlineInfo->iciBlock->bbWeight; const weight_t entryWeight = rootCompiler->fgFirstBB->bbWeight; profileFreq = fgProfileWeightsEqual(entryWeight, 0.0) ? 0.0 : callSiteWeight / entryWeight; hasProfile = true; assert(callSiteWeight >= 0); assert(entryWeight >= 0); } else if (pInlineInfo == nullptr) { // Simulate a hot callsite for PrejitRoot mode. hasProfile = true; profileFreq = 1.0; } inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, hasProfile); inlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, profileFreq); } /***************************************************************************** This method makes STATIC inlining decision based on the IL code. It should not make any inlining decision based on the context. If forceInline is true, then the inlining decision should not depend on performance heuristics (code size, etc.). */ void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult) { unsigned codeSize = methInfo->ILCodeSize; // We shouldn't have made up our minds yet... assert(!inlineResult->IsDecided()); if (methInfo->EHcount) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH); return; } if ((methInfo->ILCode == nullptr) || (codeSize == 0)) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // For now we don't inline varargs (import code can't handle it) if (methInfo->args.isVarArg()) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return; } // Reject if it has too many locals. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs); if (methInfo->locals.numArgs > MAX_INL_LCLS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS); return; } // Make sure there aren't too many arguments. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs); if (methInfo->args.numArgs > MAX_INL_ARGS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS); return; } // Note force inline state inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline); // Note IL code size inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize); if (inlineResult->IsFailure()) { return; } // Make sure maxstack is not too big inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack); if (inlineResult->IsFailure()) { return; } } /***************************************************************************** */ void Compiler::impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult) { // Either EE or JIT might throw exceptions below. // If that happens, just don't inline the method. struct Param { Compiler* pThis; GenTreeCall* call; CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; CORINFO_CONTEXT_HANDLE exactContextHnd; InlineResult* result; InlineCandidateInfo** ppInlineCandidateInfo; } param; memset(&param, 0, sizeof(param)); param.pThis = this; param.call = call; param.fncHandle = fncHandle; param.methAttr = methAttr; param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle); param.result = inlineResult; param.ppInlineCandidateInfo = ppInlineCandidateInfo; bool success = eeRunWithErrorTrap<Param>( [](Param* pParam) { CorInfoInitClassResult initClassResult; #ifdef DEBUG const char* methodName; const char* className; methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className); if (JitConfig.JitNoInline()) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE); goto _exit; } #endif /* Try to get the code address/size for the method */ CORINFO_METHOD_INFO methInfo; if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo)) { pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO); goto _exit; } // Profile data allows us to avoid early "too many IL bytes" outs. pParam->result->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, pParam->pThis->fgHaveSufficientProfileData()); bool forceInline; forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE); pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result); if (pParam->result->IsFailure()) { assert(pParam->result->IsNever()); goto _exit; } // Speculatively check if initClass() can be done. // If it can be done, we will try to inline the method. initClassResult = pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */, pParam->exactContextHnd /* context */); if (initClassResult & CORINFO_INITCLASS_DONT_INLINE) { pParam->result->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT); goto _exit; } // Given the EE the final say in whether to inline or not. // This should be last since for verifiable code, this can be expensive /* VM Inline check also ensures that the method is verifiable if needed */ CorInfoInline vmResult; vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle); if (vmResult == INLINE_FAIL) { pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE); } else if (vmResult == INLINE_NEVER) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE); } if (pParam->result->IsFailure()) { // Make sure not to report this one. It was already reported by the VM. pParam->result->SetReported(); goto _exit; } /* Get the method properties */ CORINFO_CLASS_HANDLE clsHandle; clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle); unsigned clsAttr; clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle); /* Get the return type */ var_types fncRetType; fncRetType = pParam->call->TypeGet(); #ifdef DEBUG var_types fncRealRetType; fncRealRetType = JITtype2varType(methInfo.args.retType); assert((genActualType(fncRealRetType) == genActualType(fncRetType)) || // <BUGNUM> VSW 288602 </BUGNUM> // In case of IJW, we allow to assign a native pointer to a BYREF. (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) || (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))); #endif // Allocate an InlineCandidateInfo structure, // // Or, reuse the existing GuardedDevirtualizationCandidateInfo, // which was pre-allocated to have extra room. // InlineCandidateInfo* pInfo; if (pParam->call->IsGuardedDevirtualizationCandidate()) { pInfo = pParam->call->gtInlineCandidateInfo; } else { pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo; // Null out bits we don't use when we're just inlining pInfo->guardedClassHandle = nullptr; pInfo->guardedMethodHandle = nullptr; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->likelihood = 0; pInfo->requiresInstMethodTableArg = false; } pInfo->methInfo = methInfo; pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd; pInfo->clsHandle = clsHandle; pInfo->exactContextHnd = pParam->exactContextHnd; pInfo->retExpr = nullptr; pInfo->preexistingSpillTemp = BAD_VAR_NUM; pInfo->clsAttr = clsAttr; pInfo->methAttr = pParam->methAttr; pInfo->initClassResult = initClassResult; pInfo->fncRetType = fncRetType; pInfo->exactContextNeedsRuntimeLookup = false; pInfo->inlinersContext = pParam->pThis->compInlineContext; // Note exactContextNeedsRuntimeLookup is reset later on, // over in impMarkInlineCandidate. *(pParam->ppInlineCandidateInfo) = pInfo; _exit:; }, &param); if (!success) { param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); } } //------------------------------------------------------------------------ // impInlineRecordArgInfo: record information about an inline candidate argument // // Arguments: // pInlineInfo - inline info for the inline candidate // curArgVal - tree for the caller actual argument value // argNum - logical index of this argument // inlineResult - result of ongoing inline evaluation // // Notes: // // Checks for various inline blocking conditions and makes notes in // the inline info arg table about the properties of the actual. These // properties are used later by impInlineFetchArg to determine how best to // pass the argument into the inlinee. void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult) { InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum]; inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR. curArgVal = curArgVal->gtSkipPutArgType(); curArgVal = curArgVal->gtRetExprVal(); if (curArgVal->gtOper == GT_MKREFANY) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY); return; } GenTree* lclVarTree; const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree); if (isAddressInLocal && varTypeIsStruct(lclVarTree)) { inlCurArgInfo->argIsByRefToStructLocal = true; #ifdef FEATURE_SIMD if (lvaTable[lclVarTree->AsLclVarCommon()->GetLclNum()].lvSIMDType) { pInlineInfo->hasSIMDTypeArgLocalOrReturn = true; } #endif // FEATURE_SIMD } if (curArgVal->gtFlags & GTF_ALL_EFFECT) { inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0; inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0; } if (curArgVal->gtOper == GT_LCL_VAR) { inlCurArgInfo->argIsLclVar = true; /* Remember the "original" argument number */ INDEBUG(curArgVal->AsLclVar()->gtLclILoffs = argNum;) } if (curArgVal->IsInvariant()) { inlCurArgInfo->argIsInvariant = true; if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->AsIntCon()->gtIconVal == 0)) { // Abort inlining at this call site inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS); return; } } bool isExact = false; bool isNonNull = false; inlCurArgInfo->argIsExact = (gtGetClassHandle(curArgVal, &isExact, &isNonNull) != NO_CLASS_HANDLE) && isExact; // If the arg is a local that is address-taken, we can't safely // directly substitute it into the inlinee. // // Previously we'd accomplish this by setting "argHasLdargaOp" but // that has a stronger meaning: that the arg value can change in // the method body. Using that flag prevents type propagation, // which is safe in this case. // // Instead mark the arg as having a caller local ref. if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal)) { inlCurArgInfo->argHasCallerLocalRef = true; } #ifdef DEBUG if (verbose) { if (inlCurArgInfo->argIsThis) { printf("thisArg:"); } else { printf("\nArgument #%u:", argNum); } if (inlCurArgInfo->argIsLclVar) { printf(" is a local var"); } if (inlCurArgInfo->argIsInvariant) { printf(" is a constant"); } if (inlCurArgInfo->argHasGlobRef) { printf(" has global refs"); } if (inlCurArgInfo->argHasCallerLocalRef) { printf(" has caller local ref"); } if (inlCurArgInfo->argHasSideEff) { printf(" has side effects"); } if (inlCurArgInfo->argHasLdargaOp) { printf(" has ldarga effect"); } if (inlCurArgInfo->argHasStargOp) { printf(" has starg effect"); } if (inlCurArgInfo->argIsByRefToStructLocal) { printf(" is byref to a struct local"); } printf("\n"); gtDispTree(curArgVal); printf("\n"); } #endif } //------------------------------------------------------------------------ // impInlineInitVars: setup inline information for inlinee args and locals // // Arguments: // pInlineInfo - inline info for the inline candidate // // Notes: // This method primarily adds caller-supplied info to the inlArgInfo // and sets up the lclVarInfo table. // // For args, the inlArgInfo records properties of the actual argument // including the tree node that produces the arg value. This node is // usually the tree node present at the call, but may also differ in // various ways: // - when the call arg is a GT_RET_EXPR, we search back through the ret // expr chain for the actual node. Note this will either be the original // call (which will be a failed inline by this point), or the return // expression from some set of inlines. // - when argument type casting is needed the necessary casts are added // around the argument node. // - if an argument can be simplified by folding then the node here is the // folded value. // // The method may make observations that lead to marking this candidate as // a failed inline. If this happens the initialization is abandoned immediately // to try and reduce the jit time cost for a failed inline. void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) { assert(!compIsForInlining()); GenTreeCall* call = pInlineInfo->iciCall; CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo; unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr; InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo; InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo; InlineResult* inlineResult = pInlineInfo->inlineResult; // Inlined methods always use the managed calling convention const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo, CorInfoCallConvExtension::Managed); /* init the argument stuct */ memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0])); GenTreeCall::Use* thisArg = call->gtCallThisArg; unsigned argCnt = 0; // Count of the arguments assert((methInfo->args.hasThis()) == (thisArg != nullptr)); if (thisArg != nullptr) { inlArgInfo[0].argIsThis = true; impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Record some information about each of the arguments */ bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0; #if USER_ARGS_COME_LAST unsigned typeCtxtArg = (thisArg != nullptr) ? 1 : 0; #else // USER_ARGS_COME_LAST unsigned typeCtxtArg = methInfo->args.totalILArgs(); #endif // USER_ARGS_COME_LAST for (GenTreeCall::Use& use : call->Args()) { if (hasRetBuffArg && (&use == call->gtCallArgs)) { continue; } // Ignore the type context argument if (hasTypeCtxtArg && (argCnt == typeCtxtArg)) { pInlineInfo->typeContextArg = typeCtxtArg; typeCtxtArg = 0xFFFFFFFF; continue; } GenTree* actualArg = gtFoldExpr(use.GetNode()); impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Make sure we got the arg number right */ assert(argCnt == methInfo->args.totalILArgs()); #ifdef FEATURE_SIMD bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn; #endif // FEATURE_SIMD /* We have typeless opcodes, get type information from the signature */ if (thisArg != nullptr) { lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle); lclVarInfo[0].lclHasLdlocaOp = false; #ifdef FEATURE_SIMD // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase // the inlining multiplier) for anything in that assembly. // But we only need to normalize it if it is a TYP_STRUCT // (which we need to do even if we have already set foundSIMDType). if (!foundSIMDType && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo))) { foundSIMDType = true; } #endif // FEATURE_SIMD var_types sigType = ((clsAttr & CORINFO_FLG_VALUECLASS) != 0) ? TYP_BYREF : TYP_REF; lclVarInfo[0].lclTypeInfo = sigType; GenTree* thisArgNode = thisArg->GetNode(); assert(varTypeIsGC(thisArgNode->TypeGet()) || // "this" is managed ((thisArgNode->TypeGet() == TYP_I_IMPL) && // "this" is unmgd but the method's class doesnt care (clsAttr & CORINFO_FLG_VALUECLASS))); if (genActualType(thisArgNode->TypeGet()) != genActualType(sigType)) { if (sigType == TYP_REF) { /* The argument cannot be bashed into a ref (see bug 750871) */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF); return; } /* This can only happen with byrefs <-> ints/shorts */ assert(sigType == TYP_BYREF); assert((genActualType(thisArgNode->TypeGet()) == TYP_I_IMPL) || (thisArgNode->TypeGet() == TYP_BYREF)); lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } } /* Init the types of the arguments and make sure the types * from the trees match the types in the signature */ CORINFO_ARG_LIST_HANDLE argLst; argLst = methInfo->args.args; unsigned i; for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst)) { var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args); lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst); #ifdef FEATURE_SIMD if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo))) { // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've // found a SIMD type, even if this may not be a type we recognize (the assumption is that // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier). foundSIMDType = true; if (sigType == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle()); sigType = structType; } } #endif // FEATURE_SIMD lclVarInfo[i].lclTypeInfo = sigType; lclVarInfo[i].lclHasLdlocaOp = false; /* Does the tree type match the signature type? */ GenTree* inlArgNode = inlArgInfo[i].argNode; if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE)) { assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType)); assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType)); /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints, but in bad IL cases with caller-callee signature mismatches we can see other types. Intentionally reject cases with mismatches so the jit is more flexible when encountering bad IL. */ bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) || (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) || (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType)); if (!isPlausibleTypeMatch) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE); return; } GenTree** pInlArgNode; if (inlArgNode->OperIs(GT_PUTARG_TYPE)) { // There was a widening or narrowing cast. GenTreeUnOp* putArgType = inlArgNode->AsUnOp(); pInlArgNode = &putArgType->gtOp1; inlArgNode = putArgType->gtOp1; } else { // The same size but different type of the arguments. pInlArgNode = &inlArgInfo[i].argNode; } /* Is it a narrowing or widening cast? * Widening casts are ok since the value computed is already * normalized to an int (on the IL stack) */ if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType)) { if (sigType == TYP_BYREF) { lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else if (inlArgNode->gtType == TYP_BYREF) { assert(varTypeIsIntOrI(sigType)); /* If possible bash the BYREF to an int */ if (inlArgNode->IsLocalAddrExpr() != nullptr) { inlArgNode->gtType = TYP_I_IMPL; lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else { /* Arguments 'int <- byref' cannot be changed */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT); return; } } else if (genTypeSize(sigType) < TARGET_POINTER_SIZE) { // Narrowing cast. if (inlArgNode->OperIs(GT_LCL_VAR)) { const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum(); if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum)) { // We don't need to insert a cast here as the variable // was assigned a normalized value of the right type. continue; } } inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; // Try to fold the node in case we have constant arguments. if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #ifdef TARGET_64BIT else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType)) { // This should only happen for int -> native int widening inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; /* Try to fold the node in case we have constant arguments */ if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #endif // TARGET_64BIT } } } /* Init the types of the local variables */ CORINFO_ARG_LIST_HANDLE localsSig; localsSig = methInfo->locals.args; for (i = 0; i < methInfo->locals.numArgs; i++) { bool isPinned; var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned); lclVarInfo[i + argCnt].lclHasLdlocaOp = false; lclVarInfo[i + argCnt].lclTypeInfo = type; if (varTypeIsGC(type)) { if (isPinned) { JITDUMP("Inlinee local #%02u is pinned\n", i); lclVarInfo[i + argCnt].lclIsPinned = true; // Pinned locals may cause inlines to fail. inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS); if (inlineResult->IsFailure()) { return; } } pInlineInfo->numberOfGcRefLocals++; } else if (isPinned) { JITDUMP("Ignoring pin on inlinee local #%02u -- not a GC type\n", i); } lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig); // If this local is a struct type with GC fields, inform the inliner. It may choose to bail // out on the inline. if (type == TYP_STRUCT) { CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle(); DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (inlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; some policies do // not track the relative hotness of call sites for "always" inline cases. if (pInlineInfo->iciBlock->isRunRarely()) { inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (inlineResult->IsFailure()) { return; } } } } localsSig = info.compCompHnd->getArgNext(localsSig); #ifdef FEATURE_SIMD if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo))) { foundSIMDType = true; if (supportSIMDTypes() && type == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle()); lclVarInfo[i + argCnt].lclTypeInfo = structType; } } #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd)) { foundSIMDType = true; } pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType; #endif // FEATURE_SIMD } //------------------------------------------------------------------------ // impInlineFetchLocal: get a local var that represents an inlinee local // // Arguments: // lclNum -- number of the inlinee local // reason -- debug string describing purpose of the local var // // Returns: // Number of the local to use // // Notes: // This method is invoked only for locals actually used in the // inlinee body. // // Allocates a new temp if necessary, and copies key properties // over from the inlinee local var info. unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)) { assert(compIsForInlining()); unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum]; if (tmpNum == BAD_VAR_NUM) { const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt]; const var_types lclTyp = inlineeLocal.lclTypeInfo; // The lifetime of this local might span multiple BBs. // So it is a long lifetime local. impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason)); // Copy over key info lvaTable[tmpNum].lvType = lclTyp; lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp; lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned; lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp; lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp; // Copy over class handle for ref types. Note this may be a // shared type -- someday perhaps we can get the exact // signature and pass in a more precise type. if (lclTyp == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp; if (lvaTable[tmpNum].lvSingleDef) { JITDUMP("Marked V%02u as a single def temp\n", tmpNum); } lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef()); } if (inlineeLocal.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo; } } #ifdef DEBUG // Sanity check that we're properly prepared for gc ref locals. if (varTypeIsGC(lclTyp)) { // Since there are gc locals we should have seen them earlier // and if there was a return value, set up the spill temp. assert(impInlineInfo->HasGcRefLocals()); assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp()); } else { // Make sure all pinned locals count as gc refs. assert(!inlineeLocal.lclIsPinned); } #endif // DEBUG } return tmpNum; } //------------------------------------------------------------------------ // impInlineFetchArg: return tree node for argument value in an inlinee // // Arguments: // lclNum -- argument number in inlinee IL // inlArgInfo -- argument info for inlinee // lclVarInfo -- var info for inlinee // // Returns: // Tree for the argument's value. Often an inlinee-scoped temp // GT_LCL_VAR but can be other tree kinds, if the argument // expression from the caller can be directly substituted into the // inlinee body. // // Notes: // Must be used only for arguments -- use impInlineFetchLocal for // inlinee locals. // // Direct substitution is performed when the formal argument cannot // change value in the inlinee body (no starg or ldarga), and the // actual argument expression's value cannot be changed if it is // substituted it into the inlinee body. // // Even if an inlinee-scoped temp is returned here, it may later be // "bashed" to a caller-supplied tree when arguments are actually // passed (see fgInlinePrependStatements). Bashing can happen if // the argument ends up being single use and other conditions are // met. So the contents of the tree returned here may not end up // being the ones ultimately used for the argument. // // This method will side effect inlArgInfo. It should only be called // for actual uses of the argument in the inlinee. GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo) { // Cache the relevant arg and lcl info for this argument. // We will modify argInfo but not lclVarInfo. InlArgInfo& argInfo = inlArgInfo[lclNum]; const InlLclVarInfo& lclInfo = lclVarInfo[lclNum]; const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp; const var_types lclTyp = lclInfo.lclTypeInfo; GenTree* op1 = nullptr; GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal(); if (argInfo.argIsInvariant && !argCanBeModified) { // Directly substitute constants or addresses of locals // // Clone the constant. Note that we cannot directly use // argNode in the trees even if !argInfo.argIsUsed as this // would introduce aliasing between inlArgInfo[].argNode and // impInlineExpr. Then gtFoldExpr() could change it, causing // further references to the argument working off of the // bashed copy. op1 = gtCloneExpr(argNode); PREFIX_ASSUME(op1 != nullptr); argInfo.argTmpNum = BAD_VAR_NUM; // We may need to retype to ensure we match the callee's view of the type. // Otherwise callee-pass throughs of arguments can create return type // mismatches that block inlining. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. if (op1->TypeGet() != lclTyp) { op1->gtType = genActualType(lclTyp); } } else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef) { // Directly substitute unaliased caller locals for args that cannot be modified // // Use the caller-supplied node if this is the first use. op1 = argNode; unsigned argLclNum = op1->AsLclVarCommon()->GetLclNum(); argInfo.argTmpNum = argLclNum; // Use an equivalent copy if this is the second or subsequent // use. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. If inlining is not prevented // but a cast is necessary, we similarly expect it to have been inserted then. // So here we may have argument type mismatches that are benign, for instance // passing a TYP_SHORT local (eg. normalized-on-load) as a TYP_INT arg. // The exception is when the inlining means we should start tracking the argument. if (argInfo.argIsUsed || ((lclTyp == TYP_BYREF) && (op1->TypeGet() != TYP_BYREF))) { assert(op1->gtOper == GT_LCL_VAR); assert(lclNum == op1->AsLclVar()->gtLclILoffs); // Create a new lcl var node - remember the argument lclNum op1 = impCreateLocalNode(argLclNum DEBUGARG(op1->AsLclVar()->gtLclILoffs)); // Start tracking things as a byref if the parameter is a byref. if (lclTyp == TYP_BYREF) { op1->gtType = TYP_BYREF; } } } else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp) { /* Argument is a by-ref address to a struct, a normed struct, or its field. In these cases, don't spill the byref to a local, simply clone the tree and use it. This way we will increase the chance for this byref to be optimized away by a subsequent "dereference" operation. From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal. For example, if the caller is: ldloca.s V_1 // V_1 is a local struct call void Test.ILPart::RunLdargaOnPointerArg(int32*) and the callee being inlined has: .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed ldarga.s ptrToInts call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**) then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR. */ assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL); op1 = gtCloneExpr(argNode); } else { /* Argument is a complex expression - it must be evaluated into a temp */ if (argInfo.argHasTmp) { assert(argInfo.argIsUsed); assert(argInfo.argTmpNum < lvaCount); /* Create a new lcl var node - remember the argument lclNum */ op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp)); /* This is the second or later use of the this argument, so we have to use the temp (instead of the actual arg) */ argInfo.argBashTmpNode = nullptr; } else { /* First time use */ assert(!argInfo.argIsUsed); /* Reserve a temp for the expression. * Use a large size node as we may change it later */ const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg")); lvaTable[tmpNum].lvType = lclTyp; // For ref types, determine the type of the temp. if (lclTyp == TYP_REF) { if (!argCanBeModified) { // If the arg can't be modified in the method // body, use the type of the value, if // known. Otherwise, use the declared type. assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmpNum); lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } else { // Arg might be modified, use the declared type of // the argument. lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } } assert(!lvaTable[tmpNum].IsAddressExposed()); if (argInfo.argHasLdargaOp) { lvaTable[tmpNum].lvHasLdAddrOp = 1; } if (lclInfo.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(tmpNum); } } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo; } } argInfo.argHasTmp = true; argInfo.argTmpNum = tmpNum; // If we require strict exception order, then arguments must // be evaluated in sequence before the body of the inlined method. // So we need to evaluate them to a temp. // Also, if arguments have global or local references, we need to // evaluate them to a temp before the inlined body as the // inlined body may be modifying the global ref. // TODO-1stClassStructs: We currently do not reuse an existing lclVar // if it is a struct, because it requires some additional handling. if ((!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef && !argInfo.argHasCallerLocalRef)) { /* Get a *LARGE* LCL_VAR node */ op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp) DEBUGARG(lclNum)); /* Record op1 as the very first use of this argument. If there are no further uses of the arg, we may be able to use the actual arg node instead of the temp. If we do see any further uses, we will clear this. */ argInfo.argBashTmpNode = op1; } else { /* Get a small LCL_VAR node */ op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp)); /* No bashing of this argument */ argInfo.argBashTmpNode = nullptr; } } } // Mark this argument as used. argInfo.argIsUsed = true; return op1; } /****************************************************************************** Is this the original "this" argument to the call being inlined? Note that we do not inline methods with "starg 0", and so we do not need to worry about it. */ bool Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); return (tree->gtOper == GT_LCL_VAR && tree->AsLclVarCommon()->GetLclNum() == inlArgInfo[0].argTmpNum); } //----------------------------------------------------------------------------- // impInlineIsGuaranteedThisDerefBeforeAnySideEffects: Check if a dereference in // the inlinee can guarantee that the "this" pointer is non-NULL. // // Arguments: // additionalTree - a tree to check for side effects // additionalCallArgs - a list of call args to check for side effects // dereferencedAddress - address expression being dereferenced // inlArgInfo - inlinee argument information // // Notes: // If we haven't hit a branch or a side effect, and we are dereferencing // from 'this' to access a field or make GTF_CALL_NULLCHECK call, // then we can avoid a separate null pointer check. // // The importer stack and current statement list are searched for side effects. // Trees that have been popped of the stack but haven't been appended to the // statement list and have to be checked for side effects may be provided via // additionalTree and additionalCallArgs. // bool Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); assert(opts.OptEnabled(CLFLG_INLINING)); BasicBlock* block = compCurBB; if (block != fgFirstBB) { return false; } if (!impInlineIsThis(dereferencedAddress, inlArgInfo)) { return false; } if ((additionalTree != nullptr) && GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTree->gtFlags)) { return false; } for (GenTreeCall::Use& use : GenTreeCall::UseList(additionalCallArgs)) { if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(use.GetNode()->gtFlags)) { return false; } } for (Statement* stmt : StatementList(impStmtList)) { GenTree* expr = stmt->GetRootNode(); if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags)) { return false; } } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTreeFlags stackTreeFlags = verCurrentState.esStack[level].val->gtFlags; if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags)) { return false; } } return true; } //------------------------------------------------------------------------ // impMarkInlineCandidate: determine if this call can be subsequently inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // Mostly a wrapper for impMarkInlineCandidateHelper that also undoes // guarded devirtualization for virtual calls where the method we'd // devirtualize to cannot be inlined. void Compiler::impMarkInlineCandidate(GenTree* callNode, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { GenTreeCall* call = callNode->AsCall(); // Do the actual evaluation impMarkInlineCandidateHelper(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); // If this call is an inline candidate or is not a guarded devirtualization // candidate, we're done. if (call->IsInlineCandidate() || !call->IsGuardedDevirtualizationCandidate()) { return; } // If we can't inline the call we'd guardedly devirtualize to, // we undo the guarded devirtualization, as the benefit from // just guarded devirtualization alone is likely not worth the // extra jit time and code size. // // TODO: it is possibly interesting to allow this, but requires // fixes elsewhere too... JITDUMP("Revoking guarded devirtualization candidacy for call [%06u]: target method can't be inlined\n", dspTreeID(call)); call->ClearGuardedDevirtualizationCandidate(); } //------------------------------------------------------------------------ // impMarkInlineCandidateHelper: determine if this call can be subsequently // inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // If callNode is an inline candidate, this method sets the flag // GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have // filled in the associated InlineCandidateInfo. // // If callNode is not an inline candidate, and the reason is // something that is inherent to the method being called, the // method may be marked as "noinline" to short-circuit any // future assessments of calls to this method. void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { // Let the strategy know there's another call impInlineRoot()->m_inlineStrategy->NoteCall(); if (!opts.OptEnabled(CLFLG_INLINING)) { /* XXX Mon 8/18/2008 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and * figure out why we did not set MAXOPT for this compile. */ assert(!compIsForInlining()); return; } if (compIsForImportOnly()) { // Don't bother creating the inline candidate during verification. // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification // that leads to the creation of multiple instances of Compiler. return; } InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate"); // Don't inline if not optimizing root method if (opts.compDbgCode) { inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN); return; } // Don't inline if inlining into this method is disabled. if (impInlineRoot()->m_inlineStrategy->IsInliningDisabled()) { inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE); return; } // Don't inline into callers that use the NextCallReturnAddress intrinsic. if (info.compHasNextCallRetAddr) { inlineResult.NoteFatal(InlineObservation::CALLER_USES_NEXT_CALL_RET_ADDR); return; } // Inlining candidate determination needs to honor only IL tail prefix. // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive). if (call->IsTailPrefixedCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX); return; } // Delegate Invoke method doesn't have a body and gets special cased instead. // Don't even bother trying to inline it. if (call->IsDelegateInvoke()) { inlineResult.NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // Tail recursion elimination takes precedence over inlining. // TODO: We may want to do some of the additional checks from fgMorphCall // here to reduce the chance we don't inline a call that won't be optimized // as a fast tail call or turned into a loop. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } if (call->IsVirtual()) { // Allow guarded devirt calls to be treated as inline candidates, // but reject all other virtual calls. if (!call->IsGuardedDevirtualizationCandidate()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT); return; } } /* Ignore helper calls */ if (call->gtCallType == CT_HELPER) { assert(!call->IsGuardedDevirtualizationCandidate()); inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER); return; } /* Ignore indirect calls */ if (call->gtCallType == CT_INDIRECT) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED); return; } /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding * inlining in throw blocks. I should consider the same thing for catch and filter regions. */ CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; if (call->IsGuardedDevirtualizationCandidate()) { if (call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle != nullptr) { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle; } else { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodHandle; } methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } else { fncHandle = call->gtCallMethHnd; // Reuse method flags from the original callInfo if possible if (fncHandle == callInfo->hMethod) { methAttr = callInfo->methodFlags; } else { methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } } #ifdef DEBUG if (compStressCompile(STRESS_FORCE_INLINE, 0)) { methAttr |= CORINFO_FLG_FORCEINLINE; } #endif // Check for COMPlus_AggressiveInlining if (compDoAggressiveInlining) { methAttr |= CORINFO_FLG_FORCEINLINE; } if (!(methAttr & CORINFO_FLG_FORCEINLINE)) { /* Don't bother inline blocks that are in the filter region */ if (bbInCatchHandlerILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the catch handler region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH); return; } if (bbInFilterILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the filter region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER); return; } } /* Check if we tried to inline this method before */ if (methAttr & CORINFO_FLG_DONT_INLINE) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE); return; } /* Cannot inline synchronized methods */ if (methAttr & CORINFO_FLG_SYNCH) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED); return; } /* Check legality of PInvoke callsite (for inlining of marshalling code) */ if (methAttr & CORINFO_FLG_PINVOKE) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (!impCanPInvokeInlineCallSite(block)) { inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH); return; } } InlineCandidateInfo* inlineCandidateInfo = nullptr; impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult); if (inlineResult.IsFailure()) { return; } // The old value should be null OR this call should be a guarded devirtualization candidate. assert((call->gtInlineCandidateInfo == nullptr) || call->IsGuardedDevirtualizationCandidate()); // The new value should not be null. assert(inlineCandidateInfo != nullptr); inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup; call->gtInlineCandidateInfo = inlineCandidateInfo; // If we're in an inlinee compiler, and have a return spill temp, and this inline candidate // is also a tail call candidate, it can use the same return spill temp. // if (compIsForInlining() && call->CanTailCall() && (impInlineInfo->inlineCandidateInfo->preexistingSpillTemp != BAD_VAR_NUM)) { inlineCandidateInfo->preexistingSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; JITDUMP("Inline candidate [%06u] can share spill temp V%02u\n", dspTreeID(call), inlineCandidateInfo->preexistingSpillTemp); } // Mark the call node as inline candidate. call->gtFlags |= GTF_CALL_INLINE_CANDIDATE; // Let the strategy know there's another candidate. impInlineRoot()->m_inlineStrategy->NoteCandidate(); // Since we're not actually inlining yet, and this call site is // still just an inline candidate, there's nothing to report. inlineResult.SetReported(); } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by target-specific // instructions bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName) { #if defined(TARGET_XARCH) switch (intrinsicName) { // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 // instructions to directly compute round/ceiling/floor/truncate. case NI_System_Math_Abs: case NI_System_Math_Sqrt: return true; case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: return compOpportunisticallyDependsOn(InstructionSet_SSE41); case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_FMA); default: return false; } #elif defined(TARGET_ARM64) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: case NI_System_Math_Sqrt: case NI_System_Math_Max: case NI_System_Math_Min: return true; case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_AdvSimd); default: return false; } #elif defined(TARGET_ARM) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Round: case NI_System_Math_Sqrt: return true; default: return false; } #else // TODO: This portion of logic is not implemented for other arch. // The reason for returning true is that on all other arch the only intrinsic // enabled are target intrinsics. return true; #endif } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by calling System.Math // methods. bool Compiler::IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName) { // Currently, if a math intrinsic is not implemented by target-specific // instructions, it will be implemented by a System.Math call. In the // future, if we turn to implementing some of them with helper calls, // this predicate needs to be revisited. return !IsTargetIntrinsic(intrinsicName); } bool Compiler::IsMathIntrinsic(NamedIntrinsic intrinsicName) { switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_FusedMultiplyAdd: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: case NI_System_Math_Max: case NI_System_Math_Min: case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { assert((intrinsicName > NI_SYSTEM_MATH_START) && (intrinsicName < NI_SYSTEM_MATH_END)); return true; } default: { assert((intrinsicName < NI_SYSTEM_MATH_START) || (intrinsicName > NI_SYSTEM_MATH_END)); return false; } } } bool Compiler::IsMathIntrinsic(GenTree* tree) { return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->AsIntrinsic()->gtIntrinsicName); } //------------------------------------------------------------------------ // impDevirtualizeCall: Attempt to change a virtual vtable call into a // normal call // // Arguments: // call -- the call node to examine/modify // pResolvedToken -- [IN] the resolved token used to create the call. Used for R2R. // method -- [IN/OUT] the method handle for call. Updated iff call devirtualized. // methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized. // pContextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized. // pExactContextHandle -- [OUT] updated context handle iff call devirtualized // isLateDevirtualization -- if devirtualization is happening after importation // isExplicitTailCalll -- [IN] true if we plan on using an explicit tail call // ilOffset -- IL offset of the call // // Notes: // Virtual calls in IL will always "invoke" the base class method. // // This transformation looks for evidence that the type of 'this' // in the call is exactly known, is a final class or would invoke // a final method, and if that and other safety checks pan out, // modifies the call and the call info to create a direct call. // // This transformation is initially done in the importer and not // in some subsequent optimization pass because we want it to be // upstream of inline candidate identification. // // However, later phases may supply improved type information that // can enable further devirtualization. We currently reinvoke this // code after inlining, if the return value of the inlined call is // the 'this obj' of a subsequent virtual call. // // If devirtualization succeeds and the call's this object is a // (boxed) value type, the jit will ask the EE for the unboxed entry // point. If this exists, the jit will invoke the unboxed entry // on the box payload. In addition if the boxing operation is // visible to the jit and the call is the only consmer of the box, // the jit will try analyze the box to see if the call can be instead // instead made on a local copy. If that is doable, the call is // updated to invoke the unboxed entry on the local copy and the // boxing operation is removed. // // When guarded devirtualization is enabled, this method will mark // calls as guarded devirtualization candidates, if the type of `this` // is not exactly known, and there is a plausible guess for the type. void Compiler::impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* pContextHandle, CORINFO_CONTEXT_HANDLE* pExactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset) { assert(call != nullptr); assert(method != nullptr); assert(methodFlags != nullptr); assert(pContextHandle != nullptr); // This should be a virtual vtable or virtual stub call. // assert(call->IsVirtual()); // Possibly instrument. Note for OSR+PGO we will instrument when // optimizing and (currently) won't devirtualize. We may want // to revisit -- if we can devirtualize we should be able to // suppress the probe. // // We strip BBINSTR from inlinees currently, so we'll only // do this for the root method calls. // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { assert(opts.OptimizationDisabled() || opts.IsOSR()); assert(!compIsForInlining()); // During importation, optionally flag this block as one that // contains calls requiring class profiling. Ideally perhaps // we'd just keep track of the calls themselves, so we don't // have to search for them later. // if ((call->gtCallType != CT_INDIRECT) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && (JitConfig.JitClassProfiling() > 0) && !isLateDevirtualization) { JITDUMP("\n ... marking [%06u] in " FMT_BB " for class profile instrumentation\n", dspTreeID(call), compCurBB->bbNum); ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; // Record some info needed for the class profiling probe. // pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; // Flag block as needing scrutiny // compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return; } // Bail if optimizations are disabled. if (opts.OptimizationDisabled()) { return; } #if defined(DEBUG) // Bail if devirt is disabled. if (JitConfig.JitEnableDevirtualization() == 0) { return; } // Optionally, print info on devirtualization Compiler* const rootCompiler = impInlineRoot(); const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodName, rootCompiler->info.compClassName, &rootCompiler->info.compMethodInfo->args); #endif // DEBUG // Fetch information about the virtual method we're calling. CORINFO_METHOD_HANDLE baseMethod = *method; unsigned baseMethodAttribs = *methodFlags; if (baseMethodAttribs == 0) { // For late devirt we may not have method attributes, so fetch them. baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); } else { #if defined(DEBUG) // Validate that callInfo has up to date method flags const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); // All the base method attributes should agree, save that // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1 // because of concurrent jitting activity. // // Note we don't look at this particular flag bit below, and // later on (if we do try and inline) we will rediscover why // the method can't be inlined, so there's no danger here in // seeing this particular flag bit in different states between // the cached and fresh values. if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE)) { assert(!"mismatched method attributes"); } #endif // DEBUG } // In R2R mode, we might see virtual stub calls to // non-virtuals. For instance cases where the non-virtual method // is in a different assembly but is called via CALLVIRT. For // verison resilience we must allow for the fact that the method // might become virtual in some update. // // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a // regular call+nullcheck upstream, so we won't reach this // point. if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0) { assert(call->IsVirtualStub()); assert(opts.IsReadyToRun()); JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n"); return; } // Fetch information about the class that introduced the virtual method. CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod); const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass); // Is the call an interface call? const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0; // See what we know about the type of 'this' in the call. GenTree* thisObj = call->gtCallThisArg->GetNode()->gtEffectiveVal(false); bool isExact = false; bool objIsNonNull = false; CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull); // Bail if we know nothing. if (objClass == NO_CLASS_HANDLE) { JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet())); // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG("unknown")); return; } // If the objClass is sealed (final), then we may be able to devirtualize. const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass); const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0; #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; const char* objClassNote = "[?]"; const char* objClassName = "?objClass"; const char* baseClassName = "?baseClass"; const char* baseMethodName = "?baseMethod"; if (verbose || doPrint) { objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : ""; objClassName = info.compCompHnd->getClassName(objClass); baseClassName = info.compCompHnd->getClassName(baseClass); baseMethodName = eeGetMethodName(baseMethod, nullptr); if (verbose) { printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n" " class for 'this' is %s%s (attrib %08x)\n" " base method is %s::%s\n", callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName); } } #endif // defined(DEBUG) // See if the jit's best type for `obj` is an interface. // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal // IL_021d: ldloc.0 // IL_021e: callvirt instance int32 System.Object::GetHashCode() // // If so, we can't devirtualize, but we may be able to do guarded devirtualization. // if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0) { // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // If we get this far, the jit has a lower bound class type for the `this` object being used for dispatch. // It may or may not know enough to devirtualize... if (isInterface) { assert(call->IsVirtualStub()); JITDUMP("--- base class is interface\n"); } // Fetch the method that would be called based on the declared type of 'this', // and prepare to fetch the method attributes. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = objClass; dvInfo.context = *pContextHandle; dvInfo.detail = CORINFO_DEVIRTUALIZATION_UNKNOWN; dvInfo.pResolvedTokenVirtualMethod = pResolvedToken; info.compCompHnd->resolveVirtualMethod(&dvInfo); CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod; CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext; CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE; CORINFO_RESOLVED_TOKEN* pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedMethod; if (derivedMethod != nullptr) { assert(exactContext != nullptr); assert(((size_t)exactContext & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); derivedClass = (CORINFO_CLASS_HANDLE)((size_t)exactContext & ~CORINFO_CONTEXTFLAGS_MASK); } DWORD derivedMethodAttribs = 0; bool derivedMethodIsFinal = false; bool canDevirtualize = false; #if defined(DEBUG) const char* derivedClassName = "?derivedClass"; const char* derivedMethodName = "?derivedMethod"; const char* note = "inexact or not final"; #endif // If we failed to get a method handle, we can't directly devirtualize. // // This can happen when prejitting, if the devirtualization crosses // servicing bubble boundaries, or if objClass is a shared class. // if (derivedMethod == nullptr) { JITDUMP("--- no derived method: %s\n", devirtualizationDetailToString(dvInfo.detail)); } else { // Fetch method attributes to see if method is marked final. derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod); derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0); #if defined(DEBUG) if (isExact) { note = "exact"; } else if (objClassIsFinal) { note = "final class"; } else if (derivedMethodIsFinal) { note = "final method"; } if (verbose || doPrint) { derivedMethodName = eeGetMethodName(derivedMethod, nullptr); derivedClassName = eeGetClassName(derivedClass); if (verbose) { printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note); gtDispTree(call); } } #endif // defined(DEBUG) canDevirtualize = isExact || objClassIsFinal || (!isInterface && derivedMethodIsFinal); } // We still might be able to do a guarded devirtualization. // Note the call might be an interface call or a virtual call. // if (!canDevirtualize) { JITDUMP(" Class not final or exact%s\n", isInterface ? "" : ", and method not final"); #if defined(DEBUG) // If we know the object type exactly, we generally expect we can devirtualize. // (don't when doing late devirt as we won't have an owner type (yet)) // if (!isLateDevirtualization && (isExact || objClassIsFinal) && JitConfig.JitNoteFailedExactDevirtualization()) { printf("@@@ Exact/Final devirt failure in %s at [%06u] $ %s\n", info.compFullName, dspTreeID(call), devirtualizationDetailToString(dvInfo.detail)); } #endif // Don't try guarded devirtualiztion if we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // All checks done. Time to transform the call. // // We should always have an exact class context. // // Note that wouldnt' be true if the runtime side supported array interface devirt, // the resulting method would be a generic method of the non-generic SZArrayHelper class. // assert(canDevirtualize); JITDUMP(" %s; can devirtualize\n", note); // Make the updates. call->gtFlags &= ~GTF_CALL_VIRT_VTABLE; call->gtFlags &= ~GTF_CALL_VIRT_STUB; call->gtCallMethHnd = derivedMethod; call->gtCallType = CT_USER_FUNC; call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED; // Virtual calls include an implicit null check, which we may // now need to make explicit. if (!objIsNonNull) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Clear the inline candidate info (may be non-null since // it's a union field used for other things by virtual // stubs) call->gtInlineCandidateInfo = nullptr; #if defined(DEBUG) if (verbose) { printf("... after devirt...\n"); gtDispTree(call); } if (doPrint) { printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName, baseMethodName, derivedClassName, derivedMethodName, note); } // If we successfully devirtualized based on an exact or final class, // and we have dynamic PGO data describing the likely class, make sure they agree. // // If pgo source is not dynamic we may see likely classes from other versions of this code // where types had different properties. // // If method is an inlinee we may be specializing to a class that wasn't seen at runtime. // const bool canSensiblyCheck = (isExact || objClassIsFinal) && (fgPgoSource == ICorJitInfo::PgoSource::Dynamic) && !compIsForInlining(); if (JitConfig.JitCrossCheckDevirtualizationAndPGO() && canSensiblyCheck) { // We only can handle a single likely class for now const int maxLikelyClasses = 1; LikelyClassRecord likelyClasses[maxLikelyClasses]; UINT32 numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); UINT32 likelihood = likelyClasses[0].likelihood; CORINFO_CLASS_HANDLE likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses > 0) { // PGO had better agree the class we devirtualized to is plausible. // if (likelyClass != derivedClass) { // Managed type system may report different addresses for a class handle // at different times....? // // Also, AOT may have a more nuanced notion of class equality. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { bool mismatch = true; // derivedClass will be the introducer of derived method, so it's possible // likelyClass is a non-overriding subclass. Check up the hierarchy. // CORINFO_CLASS_HANDLE parentClass = likelyClass; while (parentClass != NO_CLASS_HANDLE) { if (parentClass == derivedClass) { mismatch = false; break; } parentClass = info.compCompHnd->getParentType(parentClass); } if (mismatch || (numberOfClasses != 1) || (likelihood != 100)) { printf("@@@ Likely %p (%s) != Derived %p (%s) [n=%u, l=%u, il=%u] in %s \n", likelyClass, eeGetClassName(likelyClass), derivedClass, eeGetClassName(derivedClass), numberOfClasses, likelihood, ilOffset, info.compFullName); } assert(!(mismatch || (numberOfClasses != 1) || (likelihood != 100))); } } } } #endif // defined(DEBUG) // If the 'this' object is a value class, see if we can rework the call to invoke the // unboxed entry. This effectively inlines the normally un-inlineable wrapper stub // and exposes the potentially inlinable unboxed entry method. // // We won't optimize explicit tail calls, as ensuring we get the right tail call info // is tricky (we'd need to pass an updated sig and resolved token back to some callers). // // Note we may not have a derived class in some cases (eg interface call on an array) // if (info.compCompHnd->isValueClass(derivedClass)) { if (isExplicitTailCall) { JITDUMP("Have a direct explicit tail call to boxed entry point; can't optimize further\n"); } else { JITDUMP("Have a direct call to boxed entry point. Trying to optimize to call an unboxed entry point\n"); // Note for some shared methods the unboxed entry point requires an extra parameter. bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethod = info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg); if (unboxedEntryMethod != nullptr) { bool optimizedTheBox = false; // If the 'this' object is a local box, see if we can revise things // to not require boxing. // if (thisObj->IsBoxedValue() && !isExplicitTailCall) { // Since the call is the only consumer of the box, we know the box can't escape // since it is being passed an interior pointer. // // So, revise the box to simply create a local copy, use the address of that copy // as the this pointer, and update the entry point to the unboxed entry. // // Ideally, we then inline the boxed method and and if it turns out not to modify // the copy, we can undo the copy too. if (requiresInstMethodTableArg) { // Perform a trial box removal and ask for the type handle tree that fed the box. // JITDUMP("Unboxed entry needs method table arg...\n"); GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE); if (methodTableArg != nullptr) { // If that worked, turn the box into a copy to a local var // JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg)); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { // Pass the local var as this and the type handle as a new arg // JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table " "arg\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } call->gtCallMethHnd = unboxedEntryMethod; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethodAttribs = unboxedMethodAttribs; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n"); } } else { JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n"); } } else { JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n"); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { JITDUMP("Success! invoking unboxed entry point on local copy\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box\n"); } } if (optimizedTheBox) { #if FEATURE_TAILCALL_OPT if (call->IsImplicitTailCall()) { JITDUMP("Clearing the implicit tail call flag\n"); // If set, we clear the implicit tail call flag // as we just introduced a new address taken local variable // call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; } #endif // FEATURE_TAILCALL_OPT } } if (!optimizedTheBox) { // If we get here, we have a boxed value class that either wasn't boxed // locally, or was boxed locally but we were unable to remove the box for // various reasons. // // We can still update the call to invoke the unboxed entry, if the // boxed value is simple. // if (requiresInstMethodTableArg) { // Get the method table from the boxed object. // GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const clonedThisArg = gtClone(thisArg); if (clonedThisArg == nullptr) { JITDUMP( "unboxed entry needs MT arg, but `this` was too complex to clone. Deferring update.\n"); } else { JITDUMP("revising call to invoke unboxed entry with additional method table arg\n"); GenTree* const methodTableArg = gtNewMethodTableLookup(clonedThisArg); // Update the 'this' pointer to refer to the box payload // GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; derivedMethodAttribs = unboxedMethodAttribs; // Add the method table argument. // // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } } } else { JITDUMP("revising call to invoke unboxed entry\n"); GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; } } } else { // Many of the low-level methods on value classes won't have unboxed entries, // as they need access to the type of the object. // // Note this may be a cue for us to stack allocate the boxed object, since // we probably know that these objects don't escape. JITDUMP("Sorry, failed to find unboxed entry point\n"); } } } // Need to update call info too. // *method = derivedMethod; *methodFlags = derivedMethodAttribs; // Update context handle // *pContextHandle = MAKE_METHODCONTEXT(derivedMethod); // Update exact context handle. // if (pExactContextHandle != nullptr) { *pExactContextHandle = MAKE_CLASSCONTEXT(derivedClass); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // For R2R, getCallInfo triggers bookkeeping on the zap // side and acquires the actual symbol to call so we need to call it here. // Look up the new call info. CORINFO_CALL_INFO derivedCallInfo; eeGetCallInfo(pDerivedResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, &derivedCallInfo); // Update the call. call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT; call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT; call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup); } #endif // FEATURE_READYTORUN } //------------------------------------------------------------------------ // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call // to an intrinsic returns an exact type // // Arguments: // methodHnd -- handle for the special intrinsic method // // Returns: // Exact class handle returned by the intrinsic call, if known. // Nullptr if not known, or not likely to lead to beneficial optimization. CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd) { JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd)); CORINFO_CLASS_HANDLE result = nullptr; // See what intrinisc we have... const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd); switch (ni) { case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: { // Expect one class generic parameter; figure out which it is. CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(methodHnd, &sig); assert(sig.sigInst.classInstCount == 1); CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0]; assert(typeHnd != nullptr); // Lookup can incorrect when we have __Canon as it won't appear // to implement any interface types. // // And if we do not have a final type, devirt & inlining is // unlikely to result in much simplification. // // We can use CORINFO_FLG_FINAL to screen out both of these cases. const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd); const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0); if (isFinalType) { if (ni == NI_System_Collections_Generic_EqualityComparer_get_Default) { result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd); } else { assert(ni == NI_System_Collections_Generic_Comparer_get_Default); result = info.compCompHnd->getDefaultComparerClass(typeHnd); } JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd), result != nullptr ? eeGetClassName(result) : "unknown"); } else { JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd)); } break; } default: { JITDUMP("This special intrinsic not handled, sorry...\n"); break; } } return result; } //------------------------------------------------------------------------ // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it. // // Arguments: // token - init value for the allocated token. // // Return Value: // pointer to token into jit-allocated memory. CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(const CORINFO_RESOLVED_TOKEN& token) { CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1); *memory = token; return memory; } //------------------------------------------------------------------------ // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables. // class SpillRetExprHelper { public: SpillRetExprHelper(Compiler* comp) : comp(comp) { } void StoreRetExprResultsInArgs(GenTreeCall* call) { for (GenTreeCall::Use& use : call->Args()) { comp->fgWalkTreePre(&use.NodeRef(), SpillRetExprVisitor, this); } if (call->gtCallThisArg != nullptr) { comp->fgWalkTreePre(&call->gtCallThisArg->NodeRef(), SpillRetExprVisitor, this); } } private: static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre) { assert((pTree != nullptr) && (*pTree != nullptr)); GenTree* tree = *pTree; if ((tree->gtFlags & GTF_CALL) == 0) { // Trees with ret_expr are marked as GTF_CALL. return Compiler::WALK_SKIP_SUBTREES; } if (tree->OperGet() == GT_RET_EXPR) { SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData); walker->StoreRetExprAsLocalVar(pTree); } return Compiler::WALK_CONTINUE; } void StoreRetExprAsLocalVar(GenTree** pRetExpr) { GenTree* retExpr = *pRetExpr; assert(retExpr->OperGet() == GT_RET_EXPR); const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr")); JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp); comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE); *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet()); if (retExpr->TypeGet() == TYP_REF) { assert(comp->lvaTable[tmp].lvSingleDef == 0); comp->lvaTable[tmp].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE retClsHnd = comp->gtGetClassHandle(retExpr, &isExact, &isNonNull); if (retClsHnd != nullptr) { comp->lvaSetClass(tmp, retClsHnd, isExact); } } } private: Compiler* comp; }; //------------------------------------------------------------------------ // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate. // Spill ret_expr in the call node, because they can't be cloned. // // Arguments: // call - fat calli candidate // void Compiler::addFatPointerCandidate(GenTreeCall* call) { JITDUMP("Marking call [%06u] as fat pointer candidate\n", dspTreeID(call)); setMethodHasFatPointer(); call->SetFatPointerCandidate(); SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); } //------------------------------------------------------------------------ // considerGuardedDevirtualization: see if we can profitably guess at the // class involved in an interface or virtual call. // // Arguments: // // call - potential guarded devirtualization candidate // ilOffset - IL ofset of the call instruction // isInterface - true if this is an interface call // baseMethod - target method of the call // baseClass - class that introduced the target method // pContextHandle - context handle for the call // objClass - class of 'this' in the call // objClassName - name of the obj Class // // Notes: // Consults with VM to see if there's a likely class at runtime, // if so, adds a candidate for guarded devirtualization. // void Compiler::considerGuardedDevirtualization( GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)) { #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; #endif JITDUMP("Considering guarded devirtualization at IL offset %u (0x%x)\n", ilOffset, ilOffset); // We currently only get likely class guesses when there is PGO data // with class profiles. // if (fgPgoClassProfiles == 0) { JITDUMP("Not guessing for class: no class profile pgo data, or pgo disabled\n"); return; } // See if there's a likely guess for the class. // const unsigned likelihoodThreshold = isInterface ? 25 : 30; unsigned likelihood = 0; unsigned numberOfClasses = 0; CORINFO_CLASS_HANDLE likelyClass = NO_CLASS_HANDLE; bool doRandomDevirt = false; const int maxLikelyClasses = 32; LikelyClassRecord likelyClasses[maxLikelyClasses]; #ifdef DEBUG // Optional stress mode to pick a random known class, rather than // the most likely known class. // doRandomDevirt = JitConfig.JitRandomGuardedDevirtualization() != 0; if (doRandomDevirt) { // Reuse the random inliner's random state. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomGuardedDevirtualization()); likelyClasses[0].clsHandle = getRandomClass(fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset, random); likelyClasses[0].likelihood = 100; if (likelyClasses[0].clsHandle != NO_CLASS_HANDLE) { numberOfClasses = 1; } } else #endif { numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); } // For now we only use the most popular type likelihood = likelyClasses[0].likelihood; likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses < 1) { JITDUMP("No likely class, sorry\n"); return; } assert(likelyClass != NO_CLASS_HANDLE); // Print all likely classes JITDUMP("%s classes for %p (%s):\n", doRandomDevirt ? "Random" : "Likely", dspPtr(objClass), objClassName) for (UINT32 i = 0; i < numberOfClasses; i++) { JITDUMP(" %u) %p (%s) [likelihood:%u%%]\n", i + 1, likelyClasses[i].clsHandle, eeGetClassName(likelyClasses[i].clsHandle), likelyClasses[i].likelihood); } // Todo: a more advanced heuristic using likelihood, number of // classes, and the profile count for this block. // // For now we will guess if the likelihood is at least 25%/30% (intfc/virt), as studies // have shown this transformation should pay off even if we guess wrong sometimes. // if (likelihood < likelihoodThreshold) { JITDUMP("Not guessing for class; likelihood is below %s call threshold %u\n", callKind, likelihoodThreshold); return; } uint32_t const likelyClassAttribs = info.compCompHnd->getClassAttribs(likelyClass); if ((likelyClassAttribs & CORINFO_FLG_ABSTRACT) != 0) { // We may see an abstract likely class, if we have a stale profile. // No point guessing for this. // JITDUMP("Not guessing for class; abstract (stale profile)\n"); return; } // Figure out which method will be called. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = likelyClass; dvInfo.context = *pContextHandle; dvInfo.exactContext = *pContextHandle; dvInfo.pResolvedTokenVirtualMethod = nullptr; const bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo); if (!canResolve) { JITDUMP("Can't figure out which method would be invoked, sorry\n"); return; } CORINFO_METHOD_HANDLE likelyMethod = dvInfo.devirtualizedMethod; JITDUMP("%s call would invoke method %s\n", callKind, eeGetMethodName(likelyMethod, nullptr)); // Add this as a potential candidate. // uint32_t const likelyMethodAttribs = info.compCompHnd->getMethodAttribs(likelyMethod); addGuardedDevirtualizationCandidate(call, likelyMethod, likelyClass, likelyMethodAttribs, likelyClassAttribs, likelihood); } //------------------------------------------------------------------------ // addGuardedDevirtualizationCandidate: potentially mark the call as a guarded // devirtualization candidate // // Notes: // // Call sites in rare or unoptimized code, and calls that require cookies are // not marked as candidates. // // As part of marking the candidate, the code spills GT_RET_EXPRs anywhere in any // child tree, because and we need to clone all these trees when we clone the call // as part of guarded devirtualization, and these IR nodes can't be cloned. // // Arguments: // call - potential guarded devirtualization candidate // methodHandle - method that will be invoked if the class test succeeds // classHandle - class that will be tested for at runtime // methodAttr - attributes of the method // classAttr - attributes of the class // likelihood - odds that this class is the class seen at runtime // void Compiler::addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood) { // This transformation only makes sense for virtual calls assert(call->IsVirtual()); // Only mark calls if the feature is enabled. const bool isEnabled = JitConfig.JitEnableGuardedDevirtualization() > 0; if (!isEnabled) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- disabled by jit config\n", dspTreeID(call)); return; } // Bail if not optimizing or the call site is very likely cold if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n", dspTreeID(call)); return; } // CT_INDIRECT calls may use the cookie, bail if so... // // If transforming these provides a benefit, we could save this off in the same way // we save the stub address below. if ((call->gtCallType == CT_INDIRECT) && (call->AsCall()->gtCallCookie != nullptr)) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- CT_INDIRECT with cookie\n", dspTreeID(call)); return; } #ifdef DEBUG // See if disabled by range // static ConfigMethodRange JitGuardedDevirtualizationRange; JitGuardedDevirtualizationRange.EnsureInit(JitConfig.JitGuardedDevirtualizationRange()); assert(!JitGuardedDevirtualizationRange.Error()); if (!JitGuardedDevirtualizationRange.Contains(impInlineRoot()->info.compMethodHash())) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- excluded by " "JitGuardedDevirtualizationRange", dspTreeID(call)); return; } #endif // We're all set, proceed with candidate creation. // JITDUMP("Marking call [%06u] as guarded devirtualization candidate; will guess for class %s\n", dspTreeID(call), eeGetClassName(classHandle)); setMethodHasGuardedDevirtualization(); call->SetGuardedDevirtualizationCandidate(); // Spill off any GT_RET_EXPR subtrees so we can clone the call. // SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); // Gather some information for later. Note we actually allocate InlineCandidateInfo // here, as the devirtualized half of this call will likely become an inline candidate. // GuardedDevirtualizationCandidateInfo* pInfo = new (this, CMK_Inlining) InlineCandidateInfo; pInfo->guardedMethodHandle = methodHandle; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->guardedClassHandle = classHandle; pInfo->likelihood = likelihood; pInfo->requiresInstMethodTableArg = false; // If the guarded class is a value class, look for an unboxed entry point. // if ((classAttr & CORINFO_FLG_VALUECLASS) != 0) { JITDUMP(" ... class is a value class, looking for unboxed entry\n"); bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethodHandle = info.compCompHnd->getUnboxedEntry(methodHandle, &requiresInstMethodTableArg); if (unboxedEntryMethodHandle != nullptr) { JITDUMP(" ... updating GDV candidate with unboxed entry info\n"); pInfo->guardedMethodUnboxedEntryHandle = unboxedEntryMethodHandle; pInfo->requiresInstMethodTableArg = requiresInstMethodTableArg; } } call->gtGuardedDevirtualizationCandidateInfo = pInfo; } void Compiler::addExpRuntimeLookupCandidate(GenTreeCall* call) { setMethodHasExpRuntimeLookup(); call->SetExpRuntimeLookup(); } //------------------------------------------------------------------------ // impIsClassExact: check if a class handle can only describe values // of exactly one class. // // Arguments: // classHnd - handle for class in question // // Returns: // true if class is final and not subject to special casting from // variance or similar. // // Note: // We are conservative on arrays of primitive types here. bool Compiler::impIsClassExact(CORINFO_CLASS_HANDLE classHnd) { DWORD flags = info.compCompHnd->getClassAttribs(classHnd); DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY; if ((flags & flagsMask) == CORINFO_FLG_FINAL) { return true; } if ((flags & flagsMask) == (CORINFO_FLG_FINAL | CORINFO_FLG_ARRAY)) { CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType type = info.compCompHnd->getChildType(classHnd, &arrayElementHandle); if ((type == CORINFO_TYPE_CLASS) || (type == CORINFO_TYPE_VALUECLASS)) { return impIsClassExact(arrayElementHandle); } } return false; } //------------------------------------------------------------------------ // impCanSkipCovariantStoreCheck: see if storing a ref type value to an array // can skip the array store covariance check. // // Arguments: // value -- tree producing the value to store // array -- tree representing the array to store to // // Returns: // true if the store does not require a covariance check. // bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array) { // We should only call this when optimizing. assert(opts.OptimizationEnabled()); // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j] if (value->OperIs(GT_INDEX) && array->OperIs(GT_LCL_VAR)) { GenTree* valueIndex = value->AsIndex()->Arr(); if (valueIndex->OperIs(GT_LCL_VAR)) { unsigned valueLcl = valueIndex->AsLclVar()->GetLclNum(); unsigned arrayLcl = array->AsLclVar()->GetLclNum(); if ((valueLcl == arrayLcl) && !lvaGetDesc(arrayLcl)->IsAddressExposed()) { JITDUMP("\nstelem of ref from same array: skipping covariant store check\n"); return true; } } } // Check for assignment of NULL. if (value->OperIs(GT_CNS_INT)) { assert(value->gtType == TYP_REF); if (value->AsIntCon()->gtIconVal == 0) { JITDUMP("\nstelem of null: skipping covariant store check\n"); return true; } // Non-0 const refs can only occur with frozen objects assert(value->IsIconHandle(GTF_ICON_STR_HDL)); assert(doesMethodHaveFrozenString() || (compIsForInlining() && impInlineInfo->InlinerCompiler->doesMethodHaveFrozenString())); } // Try and get a class handle for the array if (value->gtType != TYP_REF) { return false; } bool arrayIsExact = false; bool arrayIsNonNull = false; CORINFO_CLASS_HANDLE arrayHandle = gtGetClassHandle(array, &arrayIsExact, &arrayIsNonNull); if (arrayHandle == NO_CLASS_HANDLE) { return false; } // There are some methods in corelib where we're storing to an array but the IL // doesn't reflect this (see SZArrayHelper). Avoid. DWORD attribs = info.compCompHnd->getClassAttribs(arrayHandle); if ((attribs & CORINFO_FLG_ARRAY) == 0) { return false; } CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayHandle, &arrayElementHandle); // Verify array type handle is really an array of ref type assert(arrayElemType == CORINFO_TYPE_CLASS); // Check for exactly object[] if (arrayIsExact && (arrayElementHandle == impGetObjectClass())) { JITDUMP("\nstelem to (exact) object[]: skipping covariant store check\n"); return true; } const bool arrayTypeIsSealed = impIsClassExact(arrayElementHandle); if ((!arrayIsExact && !arrayTypeIsSealed) || (arrayElementHandle == NO_CLASS_HANDLE)) { // Bail out if we don't know array's exact type return false; } bool valueIsExact = false; bool valueIsNonNull = false; CORINFO_CLASS_HANDLE valueHandle = gtGetClassHandle(value, &valueIsExact, &valueIsNonNull); // Array's type is sealed and equals to value's type if (arrayTypeIsSealed && (valueHandle == arrayElementHandle)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } // Array's type is not sealed but we know its exact type if (arrayIsExact && (valueHandle != NO_CLASS_HANDLE) && (info.compCompHnd->compareTypesForCast(valueHandle, arrayElementHandle) == TypeCompareState::Must)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } return false; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Importer XX XX XX XX Imports the given method and converts it to semantic trees XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "corexcep.h" #define Verify(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ } \ } while (0) #define VerifyOrReturn(cond, msg) \ do \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return; \ } \ } while (0) #define VerifyOrReturnSpeculative(cond, msg, speculative) \ do \ { \ if (speculative) \ { \ if (!(cond)) \ { \ return false; \ } \ } \ else \ { \ if (!(cond)) \ { \ verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \ return false; \ } \ } \ } while (0) /*****************************************************************************/ void Compiler::impInit() { impStmtList = impLastStmt = nullptr; #ifdef DEBUG impInlinedCodeSize = 0; #endif // DEBUG } /***************************************************************************** * * Pushes the given tree on the stack. */ void Compiler::impPushOnStack(GenTree* tree, typeInfo ti) { /* Check for overflow. If inlining, we may be using a bigger stack */ if ((verCurrentState.esStackDepth >= info.compMaxStack) && (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0))) { BADCODE("stack overflow"); } #ifdef DEBUG // If we are pushing a struct, make certain we know the precise type! if (tree->TypeGet() == TYP_STRUCT) { assert(ti.IsType(TI_STRUCT)); CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle(); assert(clsHnd != NO_CLASS_HANDLE); } #endif // DEBUG verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti; verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree; if ((tree->gtType == TYP_LONG) && (compLongUsed == false)) { compLongUsed = true; } else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false)) { compFloatingPointUsed = true; } } inline void Compiler::impPushNullObjRefOnStack() { impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL)); } // This method gets called when we run into unverifiable code // (and we are verifying the method) inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { #ifdef DEBUG const char* tail = strrchr(file, '\\'); if (tail) { file = tail + 1; } if (JitConfig.JitBreakOnUnsafeCode()) { assert(!"Unsafe code detected"); } #endif JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); if (compIsForImportOnly()) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line)); } } inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file) DEBUGARG(unsigned line)) { JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line, msg, info.compFullName, impCurOpcName, impCurOpcOffs)); #ifdef DEBUG // BreakIfDebuggerPresent(); if (getBreakOnBadCode()) { assert(!"Typechecking error"); } #endif RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr); UNREACHABLE(); } // helper function that will tell us if the IL instruction at the addr passed // by param consumes an address at the top of the stack. We use it to save // us lvAddrTaken bool Compiler::impILConsumesAddr(const BYTE* codeAddr) { assert(!compIsForInlining()); OPCODE opcode; opcode = (OPCODE)getU1LittleEndian(codeAddr); switch (opcode) { // case CEE_LDFLDA: We're taking this one out as if you have a sequence // like // // ldloca.0 // ldflda whatever // // of a primitivelike struct, you end up after morphing with addr of a local // that's not marked as addrtaken, which is wrong. Also ldflda is usually used // for structs that contain other structs, which isnt a case we handle very // well now for other reasons. case CEE_LDFLD: { // We won't collapse small fields. This is probably not the right place to have this // check, but we're only using the function for this purpose, and is easy to factor // out if we need to do so. CORINFO_RESOLVED_TOKEN resolvedToken; impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field); var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField)); // Preserve 'small' int types if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } if (varTypeIsSmall(lclTyp)) { return false; } return true; } default: break; } return false; } void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind) { pResolvedToken->tokenContext = impTokenLookupContextHandle; pResolvedToken->tokenScope = info.compScopeHnd; pResolvedToken->token = getU4LittleEndian(addr); pResolvedToken->tokenType = kind; info.compCompHnd->resolveToken(pResolvedToken); } /***************************************************************************** * * Pop one tree from the stack. */ StackEntry Compiler::impPopStack() { if (verCurrentState.esStackDepth == 0) { BADCODE("stack underflow"); } return verCurrentState.esStack[--verCurrentState.esStackDepth]; } /***************************************************************************** * * Peep at n'th (0-based) tree on the top of the stack. */ StackEntry& Compiler::impStackTop(unsigned n) { if (verCurrentState.esStackDepth <= n) { BADCODE("stack underflow"); } return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1]; } unsigned Compiler::impStackHeight() { return verCurrentState.esStackDepth; } /***************************************************************************** * Some of the trees are spilled specially. While unspilling them, or * making a copy, these need to be handled specially. The function * enumerates the operators possible after spilling. */ #ifdef DEBUG // only used in asserts static bool impValidSpilledStackEntry(GenTree* tree) { if (tree->gtOper == GT_LCL_VAR) { return true; } if (tree->OperIsConst()) { return true; } return false; } #endif /***************************************************************************** * * The following logic is used to save/restore stack contents. * If 'copy' is true, then we make a copy of the trees on the stack. These * have to all be cloneable/spilled values. */ void Compiler::impSaveStackState(SavedStack* savePtr, bool copy) { savePtr->ssDepth = verCurrentState.esStackDepth; if (verCurrentState.esStackDepth) { savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth]; size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees); if (copy) { StackEntry* table = savePtr->ssTrees; /* Make a fresh copy of all the stack entries */ for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++) { table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo; GenTree* tree = verCurrentState.esStack[level].val; assert(impValidSpilledStackEntry(tree)); switch (tree->gtOper) { case GT_CNS_INT: case GT_CNS_LNG: case GT_CNS_DBL: case GT_CNS_STR: case GT_LCL_VAR: table->val = gtCloneExpr(tree); break; default: assert(!"Bad oper - Not covered by impValidSpilledStackEntry()"); break; } } } else { memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize); } } } void Compiler::impRestoreStackState(SavedStack* savePtr) { verCurrentState.esStackDepth = savePtr->ssDepth; if (verCurrentState.esStackDepth) { memcpy(verCurrentState.esStack, savePtr->ssTrees, verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack)); } } //------------------------------------------------------------------------ // impBeginTreeList: Get the tree list started for a new basic block. // inline void Compiler::impBeginTreeList() { assert(impStmtList == nullptr && impLastStmt == nullptr); } /***************************************************************************** * * Store the given start and end stmt in the given basic block. This is * mostly called by impEndTreeList(BasicBlock *block). It is called * directly only for handling CEE_LEAVEs out of finally-protected try's. */ inline void Compiler::impEndTreeList(BasicBlock* block, Statement* firstStmt, Statement* lastStmt) { /* Make the list circular, so that we can easily walk it backwards */ firstStmt->SetPrevStmt(lastStmt); /* Store the tree list in the basic block */ block->bbStmtList = firstStmt; /* The block should not already be marked as imported */ assert((block->bbFlags & BBF_IMPORTED) == 0); block->bbFlags |= BBF_IMPORTED; } inline void Compiler::impEndTreeList(BasicBlock* block) { if (impStmtList == nullptr) { // The block should not already be marked as imported. assert((block->bbFlags & BBF_IMPORTED) == 0); // Empty block. Just mark it as imported. block->bbFlags |= BBF_IMPORTED; } else { impEndTreeList(block, impStmtList, impLastStmt); } #ifdef DEBUG if (impLastILoffsStmt != nullptr) { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } #endif impStmtList = impLastStmt = nullptr; } /***************************************************************************** * * Check that storing the given tree doesnt mess up the semantic order. Note * that this has only limited value as we can only check [0..chkLevel). */ inline void Compiler::impAppendStmtCheck(Statement* stmt, unsigned chkLevel) { #ifndef DEBUG return; #else if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE) { return; } GenTree* tree = stmt->GetRootNode(); // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack if (tree->gtFlags & GTF_CALL) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0); } } if (tree->gtOper == GT_ASG) { // For an assignment to a local variable, all references of that // variable have to be spilled. If it is aliased, all calls and // indirect accesses have to be spilled if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) { unsigned lclNum = tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); for (unsigned level = 0; level < chkLevel; level++) { assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum)); assert(!lvaTable[lclNum].IsAddressExposed() || (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0); } } // If the access may be to global memory, all side effects have to be spilled. else if (tree->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) { for (unsigned level = 0; level < chkLevel; level++) { assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0); } } } #endif } //------------------------------------------------------------------------ // impAppendStmt: Append the given statement to the current block's tree list. // // // Arguments: // stmt - The statement to add. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // void Compiler::impAppendStmt(Statement* stmt, unsigned chkLevel, bool checkConsumedDebugInfo) { if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } if ((chkLevel != 0) && (chkLevel != (unsigned)CHECK_SPILL_NONE)) { assert(chkLevel <= verCurrentState.esStackDepth); /* If the statement being appended has any side-effects, check the stack to see if anything needs to be spilled to preserve correct ordering. */ GenTree* expr = stmt->GetRootNode(); GenTreeFlags flags = expr->gtFlags & GTF_GLOB_EFFECT; // Assignment to (unaliased) locals don't count as a side-effect as // we handle them specially using impSpillLclRefs(). Temp locals should // be fine too. if ((expr->gtOper == GT_ASG) && (expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && ((expr->AsOp()->gtOp1->gtFlags & GTF_GLOB_REF) == 0) && !gtHasLocalsWithAddrOp(expr->AsOp()->gtOp2)) { GenTreeFlags op2Flags = expr->AsOp()->gtOp2->gtFlags & GTF_GLOB_EFFECT; assert(flags == (op2Flags | GTF_ASG)); flags = op2Flags; } if (flags != 0) { bool spillGlobEffects = false; if ((flags & GTF_CALL) != 0) { // If there is a call, we have to spill global refs spillGlobEffects = true; } else if (!expr->OperIs(GT_ASG)) { if ((flags & GTF_ASG) != 0) { // The expression is not an assignment node but it has an assignment side effect, it // must be an atomic op, HW intrinsic or some other kind of node that stores to memory. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } } else { GenTree* lhs = expr->gtGetOp1(); GenTree* rhs = expr->gtGetOp2(); if (((rhs->gtFlags | lhs->gtFlags) & GTF_ASG) != 0) { // Either side of the assignment node has an assignment side effect. // Since we don't know what it assigns to, we need to spill global refs. spillGlobEffects = true; } else if ((lhs->gtFlags & GTF_GLOB_REF) != 0) { spillGlobEffects = true; } } impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt")); } else { impSpillSpecialSideEff(); } } impAppendStmtCheck(stmt, chkLevel); impAppendStmt(stmt); #ifdef FEATURE_SIMD impMarkContiguousSIMDFieldAssignments(stmt); #endif // Once we set the current offset as debug info in an appended tree, we are // ready to report the following offsets. Note that we need to compare // offsets here instead of debug info, since we do not set the "is call" // bit in impCurStmtDI. if (checkConsumedDebugInfo && (impLastStmt->GetDebugInfo().GetLocation().GetOffset() == impCurStmtDI.GetLocation().GetOffset())) { impCurStmtOffsSet(BAD_IL_OFFSET); } #ifdef DEBUG if (impLastILoffsStmt == nullptr) { impLastILoffsStmt = stmt; } if (verbose) { printf("\n\n"); gtDispStmt(stmt); } #endif } //------------------------------------------------------------------------ // impAppendStmt: Add the statement to the current stmts list. // // Arguments: // stmt - the statement to add. // inline void Compiler::impAppendStmt(Statement* stmt) { if (impStmtList == nullptr) { // The stmt is the first in the list. impStmtList = stmt; } else { // Append the expression statement to the existing list. impLastStmt->SetNextStmt(stmt); stmt->SetPrevStmt(impLastStmt); } impLastStmt = stmt; } //------------------------------------------------------------------------ // impExtractLastStmt: Extract the last statement from the current stmts list. // // Return Value: // The extracted statement. // // Notes: // It assumes that the stmt will be reinserted later. // Statement* Compiler::impExtractLastStmt() { assert(impLastStmt != nullptr); Statement* stmt = impLastStmt; impLastStmt = impLastStmt->GetPrevStmt(); if (impLastStmt == nullptr) { impStmtList = nullptr; } return stmt; } //------------------------------------------------------------------------- // impInsertStmtBefore: Insert the given "stmt" before "stmtBefore". // // Arguments: // stmt - a statement to insert; // stmtBefore - an insertion point to insert "stmt" before. // inline void Compiler::impInsertStmtBefore(Statement* stmt, Statement* stmtBefore) { assert(stmt != nullptr); assert(stmtBefore != nullptr); if (stmtBefore == impStmtList) { impStmtList = stmt; } else { Statement* stmtPrev = stmtBefore->GetPrevStmt(); stmt->SetPrevStmt(stmtPrev); stmtPrev->SetNextStmt(stmt); } stmt->SetNextStmt(stmtBefore); stmtBefore->SetPrevStmt(stmt); } //------------------------------------------------------------------------ // impAppendTree: Append the given expression tree to the current block's tree list. // // // Arguments: // tree - The tree that will be the root of the newly created statement. // chkLevel - [0..chkLevel) is the portion of the stack which we will check // for interference with stmt and spill if needed. // di - Debug information to associate with the statement. // checkConsumedDebugInfo - Whether to check for consumption of impCurStmtDI. impCurStmtDI // marks the debug info of the current boundary and is set when we // start importing IL at that boundary. If this parameter is true, // then the function checks if 'stmt' has been associated with the // current boundary, and if so, clears it so that we do not attach // it to more upcoming statements. // // Return value: // The newly created statement. // Statement* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, const DebugInfo& di, bool checkConsumedDebugInfo) { assert(tree); /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impAppendStmt(stmt, chkLevel, checkConsumedDebugInfo); return stmt; } /***************************************************************************** * * Insert the given expression tree before "stmtBefore" */ void Compiler::impInsertTreeBefore(GenTree* tree, const DebugInfo& di, Statement* stmtBefore) { /* Allocate an 'expression statement' node */ Statement* stmt = gtNewStmt(tree, di); /* Append the statement to the current block's stmt list */ impInsertStmtBefore(stmt, stmtBefore); } /***************************************************************************** * * Append an assignment of the given value to a temp to the current tree list. * curLevel is the stack level for which the spill to the temp is being done. */ void Compiler::impAssignTempGen(unsigned tmp, GenTree* val, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg = gtNewTempAssign(tmp, val); if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * same as above, but handle the valueclass case too */ void Compiler::impAssignTempGen(unsigned tmpNum, GenTree* val, CORINFO_CLASS_HANDLE structType, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* asg; assert(val->TypeGet() != TYP_STRUCT || structType != NO_CLASS_HANDLE); if (varTypeIsStruct(val) && (structType != NO_CLASS_HANDLE)) { assert(tmpNum < lvaCount); assert(structType != NO_CLASS_HANDLE); // if the method is non-verifiable the assert is not true // so at least ignore it in the case when verification is turned on // since any block that tries to use the temp would have failed verification. var_types varType = lvaTable[tmpNum].lvType; assert(varType == TYP_UNDEF || varTypeIsStruct(varType)); lvaSetStruct(tmpNum, structType, false); varType = lvaTable[tmpNum].lvType; // Now, set the type of the struct value. Note that lvaSetStruct may modify the type // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType) // that has been passed in for the value being assigned to the temp, in which case we // need to set 'val' to that same type. // Note also that if we always normalized the types of any node that might be a struct // type, this would not be necessary - but that requires additional JIT/EE interface // calls that may not actually be required - e.g. if we only access a field of a struct. GenTree* dst = gtNewLclvNode(tmpNum, varType); asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, di, block); } else { asg = gtNewTempAssign(tmpNum, val); } if (!asg->IsNothingNode()) { if (pAfterStmt) { Statement* asgStmt = gtNewStmt(asg, di); fgInsertStmtAfter(block, *pAfterStmt, asgStmt); *pAfterStmt = asgStmt; } else { impAppendTree(asg, curLevel, impCurStmtDI); } } } /***************************************************************************** * * Pop the given number of values from the stack and return a list node with * their values. * The 'prefixTree' argument may optionally contain an argument * list that is prepended to the list returned from this function. * * The notion of prepended is a bit misleading in that the list is backwards * from the way I would expect: The first element popped is at the end of * the returned list, and prefixTree is 'before' that, meaning closer to * the end of the list. To get to prefixTree, you have to walk to the * end of the list. * * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as * such we reverse its meaning such that returnValue has a reversed * prefixTree at the head of the list. */ GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig, GenTreeCall::Use* prefixArgs) { assert(sig == nullptr || count == sig->numArgs); CORINFO_CLASS_HANDLE structType; GenTreeCall::Use* argList; if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { argList = nullptr; } else { // ARG_ORDER_L2R argList = prefixArgs; } while (count--) { StackEntry se = impPopStack(); typeInfo ti = se.seTypeInfo; GenTree* temp = se.val; if (varTypeIsStruct(temp)) { // Morph trees that aren't already OBJs or MKREFANY to be OBJs assert(ti.IsType(TI_STRUCT)); structType = ti.GetClassHandleForValueClass(); bool forceNormalization = false; if (varTypeIsSIMD(temp)) { // We need to ensure that fgMorphArgs will use the correct struct handle to ensure proper // ABI handling of this argument. // Note that this can happen, for example, if we have a SIMD intrinsic that returns a SIMD type // with a different baseType than we've seen. // We also need to ensure an OBJ node if we have a FIELD node that might be transformed to LCL_FLD // or a plain GT_IND. // TODO-Cleanup: Consider whether we can eliminate all of these cases. if ((gtGetStructHandleIfPresent(temp) != structType) || temp->OperIs(GT_FIELD)) { forceNormalization = true; } } #ifdef DEBUG if (verbose) { printf("Calling impNormStructVal on:\n"); gtDispTree(temp); } #endif temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL, forceNormalization); #ifdef DEBUG if (verbose) { printf("resulting tree:\n"); gtDispTree(temp); } #endif } /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */ argList = gtPrependNewCallArg(temp, argList); } if (sig != nullptr) { if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass); } CORINFO_ARG_LIST_HANDLE sigArgs = sig->args; GenTreeCall::Use* arg; for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--) { PREFIX_ASSUME(arg != nullptr); CORINFO_CLASS_HANDLE classHnd; CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd)); var_types jitSigType = JITtype2varType(corType); if (!impCheckImplicitArgumentCoercion(jitSigType, arg->GetNode()->TypeGet())) { BADCODE("the call argument has a type that can't be implicitly converted to the signature type"); } // insert implied casts (from float to double or double to float) if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT)) { arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE)); } else if ((jitSigType == TYP_FLOAT) && (arg->GetNode()->TypeGet() == TYP_DOUBLE)) { arg->SetNode(gtNewCastNode(TYP_FLOAT, arg->GetNode(), false, TYP_FLOAT)); } // insert any widening or narrowing casts for backwards compatibility arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType)); if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR && corType != CORINFO_TYPE_VAR) { CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs); if (argRealClass != nullptr) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggered from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass); } } const var_types nodeArgType = arg->GetNode()->TypeGet(); if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType)) { assert(!varTypeIsStruct(nodeArgType)); // Some ABI require precise size information for call arguments less than target pointer size, // for example arm64 OSX. Create a special node to keep this information until morph // consumes it into `fgArgInfo`. GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode()); arg->SetNode(putArgType); } sigArgs = info.compCompHnd->getArgNext(sigArgs); } } if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) { // Prepend the prefixTree // Simple in-place reversal to place treeList // at the end of a reversed prefixTree while (prefixArgs != nullptr) { GenTreeCall::Use* next = prefixArgs->GetNext(); prefixArgs->SetNext(argList); argList = prefixArgs; prefixArgs = next; } } return argList; } static bool TypeIs(var_types type1, var_types type2) { return type1 == type2; } // Check if type1 matches any type from the list. template <typename... T> static bool TypeIs(var_types type1, var_types type2, T... rest) { return TypeIs(type1, type2) || TypeIs(type1, rest...); } //------------------------------------------------------------------------ // impCheckImplicitArgumentCoercion: check that the node's type is compatible with // the signature's type using ECMA implicit argument coercion table. // // Arguments: // sigType - the type in the call signature; // nodeType - the node type. // // Return Value: // true if they are compatible, false otherwise. // // Notes: // - it is currently allowing byref->long passing, should be fixed in VM; // - it can't check long -> native int case on 64-bit platforms, // so the behavior is different depending on the target bitness. // bool Compiler::impCheckImplicitArgumentCoercion(var_types sigType, var_types nodeType) const { if (sigType == nodeType) { return true; } if (TypeIs(sigType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT)) { if (TypeIs(nodeType, TYP_BOOL, TYP_UBYTE, TYP_BYTE, TYP_USHORT, TYP_SHORT, TYP_UINT, TYP_INT, TYP_I_IMPL)) { return true; } } else if (TypeIs(sigType, TYP_ULONG, TYP_LONG)) { if (TypeIs(nodeType, TYP_LONG)) { return true; } } else if (TypeIs(sigType, TYP_FLOAT, TYP_DOUBLE)) { if (TypeIs(nodeType, TYP_FLOAT, TYP_DOUBLE)) { return true; } } else if (TypeIs(sigType, TYP_BYREF)) { if (TypeIs(nodeType, TYP_I_IMPL)) { return true; } // This condition tolerates such IL: // ; V00 this ref this class-hnd // ldarg.0 // call(byref) if (TypeIs(nodeType, TYP_REF)) { return true; } } else if (varTypeIsStruct(sigType)) { if (varTypeIsStruct(nodeType)) { return true; } } // This condition should not be under `else` because `TYP_I_IMPL` // intersects with `TYP_LONG` or `TYP_INT`. if (TypeIs(sigType, TYP_I_IMPL, TYP_U_IMPL)) { // Note that it allows `ldc.i8 1; call(nint)` on 64-bit platforms, // but we can't distinguish `nint` from `long` there. if (TypeIs(nodeType, TYP_I_IMPL, TYP_U_IMPL, TYP_INT, TYP_UINT)) { return true; } // It tolerates IL that ECMA does not allow but that is commonly used. // Example: // V02 loc1 struct <RTL_OSVERSIONINFOEX, 32> // ldloca.s 0x2 // call(native int) if (TypeIs(nodeType, TYP_BYREF)) { return true; } } return false; } /***************************************************************************** * * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.) * The first "skipReverseCount" items are not reversed. */ GenTreeCall::Use* Compiler::impPopReverseCallArgs(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount) { assert(skipReverseCount <= count); GenTreeCall::Use* list = impPopCallArgs(count, sig); // reverse the list if (list == nullptr || skipReverseCount == count) { return list; } GenTreeCall::Use* ptr = nullptr; // Initialized to the first node that needs to be reversed GenTreeCall::Use* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed if (skipReverseCount == 0) { ptr = list; } else { lastSkipNode = list; // Get to the first node that needs to be reversed for (unsigned i = 0; i < skipReverseCount - 1; i++) { lastSkipNode = lastSkipNode->GetNext(); } PREFIX_ASSUME(lastSkipNode != nullptr); ptr = lastSkipNode->GetNext(); } GenTreeCall::Use* reversedList = nullptr; do { GenTreeCall::Use* tmp = ptr->GetNext(); ptr->SetNext(reversedList); reversedList = ptr; ptr = tmp; } while (ptr != nullptr); if (skipReverseCount) { lastSkipNode->SetNext(reversedList); return list; } else { return reversedList; } } //------------------------------------------------------------------------ // impAssignStruct: Create a struct assignment // // Arguments: // dest - the destination of the assignment // src - the value to be assigned // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // ilOffset - il offset for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStruct(GenTree* dest, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = nullptr */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = nullptr */ ) { assert(varTypeIsStruct(dest)); DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } while (dest->gtOper == GT_COMMA) { // Second thing is the struct. assert(varTypeIsStruct(dest->AsOp()->gtOp2)); // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree. if (pAfterStmt) { Statement* newStmt = gtNewStmt(dest->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(dest->AsOp()->gtOp1, curLevel, usedDI); // do the side effect } // set dest to the second thing dest = dest->AsOp()->gtOp2; } assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD || dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX); // Return a NOP if this is a self-assignment. if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR && src->AsLclVarCommon()->GetLclNum() == dest->AsLclVarCommon()->GetLclNum()) { return gtNewNothingNode(); } // TODO-1stClassStructs: Avoid creating an address if it is not needed, // or re-creating a Blk node if it is. GenTree* destAddr; if (dest->gtOper == GT_IND || dest->OperIsBlk()) { destAddr = dest->AsOp()->gtOp1; } else { destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); } return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, usedDI, block)); } //------------------------------------------------------------------------ // impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'. // // Arguments: // destAddr - address of the destination of the assignment // src - source of the assignment // structHnd - handle representing the struct type // curLevel - stack level for which a spill may be being done // pAfterStmt - statement to insert any additional statements after // di - debug info for new statements // block - block to insert any additional statements in // // Return Value: // The tree that should be appended to the statement list that represents the assignment. // // Notes: // Temp assignments may be appended to impStmtList if spilling is necessary. GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, GenTree* src, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt, /* = NULL */ const DebugInfo& di, /* = DebugInfo() */ BasicBlock* block /* = NULL */ ) { GenTree* dest = nullptr; GenTreeFlags destFlags = GTF_EMPTY; DebugInfo usedDI = di; if (!usedDI.IsValid()) { usedDI = impCurStmtDI; } #ifdef DEBUG #ifdef FEATURE_HW_INTRINSICS if (src->OperIs(GT_HWINTRINSIC)) { const GenTreeHWIntrinsic* intrinsic = src->AsHWIntrinsic(); if (HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId())) { assert(src->TypeGet() == TYP_STRUCT); } else { assert(varTypeIsSIMD(src)); } } else #endif // FEATURE_HW_INTRINSICS { assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR, GT_COMMA) || ((src->TypeGet() != TYP_STRUCT) && src->OperIsSIMD())); } #endif // DEBUG var_types asgType = src->TypeGet(); if (src->gtOper == GT_CALL) { GenTreeCall* srcCall = src->AsCall(); if (srcCall->TreatAsHasRetBufArg(this)) { // Case of call returning a struct via hidden retbuf arg CLANG_FORMAT_COMMENT_ANCHOR; #if !defined(TARGET_ARM) // Unmanaged instance methods on Windows or Unix X86 need the retbuf arg after the first (this) parameter if ((TargetOS::IsWindows || compUnixX86Abi()) && srcCall->IsUnmanaged()) { if (callConvIsInstanceMethodCallConv(srcCall->GetUnmanagedCallConv())) { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the second-to-last node // so it will be pushed on to the stack after the user args but before the native this arg // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else if (srcCall->GetUnmanagedCallConv() == CorInfoCallConvExtension::Thiscall) { // For thiscall, the "this" parameter is not included in the argument list reversal, // so we need to put the return buffer as the last parameter. for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } else if (lastArg->GetNext() == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, lastArg); } else { assert(lastArg != nullptr && lastArg->GetNext() != nullptr); GenTreeCall::Use* secondLastArg = lastArg; lastArg = lastArg->GetNext(); for (; lastArg->GetNext() != nullptr; secondLastArg = lastArg, lastArg = lastArg->GetNext()) ; assert(secondLastArg->GetNext() != nullptr); gtInsertNewCallArgAfter(destAddr, secondLastArg); } #else GenTreeCall::Use* thisArg = gtInsertNewCallArgAfter(destAddr, srcCall->gtCallArgs); #endif } else { #ifdef TARGET_X86 // The argument list has already been reversed. // Insert the return buffer as the last node so it will be pushed on to the stack last // as required by the native ABI. GenTreeCall::Use* lastArg = srcCall->gtCallArgs; if (lastArg == nullptr) { srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } else { for (; lastArg->GetNext() != nullptr; lastArg = lastArg->GetNext()) ; gtInsertNewCallArgAfter(destAddr, lastArg); } #else // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); #endif } } else #endif // !defined(TARGET_ARM) { // insert the return value buffer into the argument list as first byref parameter srcCall->gtCallArgs = gtPrependNewCallArg(destAddr, srcCall->gtCallArgs); } // now returns void, not a struct src->gtType = TYP_VOID; // return the morphed call node return src; } else { // Case of call returning a struct in one or more registers. var_types returnType = (var_types)srcCall->gtReturnType; // First we try to change this to "LclVar/LclFld = call" // if ((destAddr->gtOper == GT_ADDR) && (destAddr->AsOp()->gtOp1->gtOper == GT_LCL_VAR)) { // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD. // That is, the IR will be of the form lclVar = call for multi-reg return // GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar(); unsigned lclNum = lcl->GetLclNum(); LclVarDsc* varDsc = lvaGetDesc(lclNum); if (src->AsCall()->HasMultiRegRetVal()) { // Mark the struct LclVar as used in a MultiReg return context // which currently makes it non promotable. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; } dest = lcl; #if defined(TARGET_ARM) // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case, // but that method has not been updadted to include ARM. impMarkLclDstNotPromotable(lclNum, src, structHnd); lcl->gtFlags |= GTF_DONT_CSE; #elif defined(UNIX_AMD64_ABI) // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs. assert(!src->AsCall()->IsVarargs() && "varargs not allowed for System V OSs."); // Make the struct non promotable. The eightbytes could contain multiple fields. // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. // TODO-Cleanup: Why is this needed here? This seems that it will set this even for // non-multireg returns. lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; #endif } else // we don't have a GT_ADDR of a GT_LCL_VAR { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. asgType = returnType; destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->gtOper == GT_RET_EXPR) { GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate->AsCall(); noway_assert(call->gtOper == GT_CALL); if (call->HasRetBufArg()) { // insert the return value buffer into the argument list as first byref parameter call->gtCallArgs = gtPrependNewCallArg(destAddr, call->gtCallArgs); // now returns void, not a struct src->gtType = TYP_VOID; call->gtType = TYP_VOID; // We already have appended the write to 'dest' GT_CALL's args // So now we just return an empty node (pruning the GT_RET_EXPR) return src; } else { // Case of inline method returning a struct in one or more registers. // We won't need a return buffer asgType = src->gtType; if ((destAddr->gtOper != GT_ADDR) || (destAddr->AsOp()->gtOp1->gtOper != GT_LCL_VAR)) { // !!! The destination could be on stack. !!! // This flag will let us choose the correct write barrier. destFlags = GTF_IND_TGTANYWHERE; } } } else if (src->OperIsBlk()) { asgType = impNormStructType(structHnd); if (src->gtOper == GT_OBJ) { assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd); } } else if (src->gtOper == GT_INDEX) { asgType = impNormStructType(structHnd); assert(src->AsIndex()->gtStructElemClass == structHnd); } else if (src->gtOper == GT_MKREFANY) { // Since we are assigning the result of a GT_MKREFANY, // "destAddr" must point to a refany. GenTree* destAddrClone; destAddr = impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment")); assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0); assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF); fgAddFieldSeqForZeroOffset(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField())); GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr); GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL); typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()); GenTree* typeSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset)); // append the assign of the pointer value GenTree* asg = gtNewAssignNode(ptrSlot, src->AsOp()->gtOp1); if (pAfterStmt) { Statement* newStmt = gtNewStmt(asg, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else { impAppendTree(asg, curLevel, usedDI); } // return the assign of the type value, to be appended return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2); } else if (src->gtOper == GT_COMMA) { // The second thing is the struct or its address. assert(varTypeIsStruct(src->AsOp()->gtOp2) || src->AsOp()->gtOp2->gtType == TYP_BYREF); if (pAfterStmt) { // Insert op1 after '*pAfterStmt' Statement* newStmt = gtNewStmt(src->AsOp()->gtOp1, usedDI); fgInsertStmtAfter(block, *pAfterStmt, newStmt); *pAfterStmt = newStmt; } else if (impLastStmt != nullptr) { // Do the side-effect as a separate statement. impAppendTree(src->AsOp()->gtOp1, curLevel, usedDI); } else { // In this case we have neither been given a statement to insert after, nor are we // in the importer where we can append the side effect. // Instead, we're going to sink the assignment below the COMMA. src->AsOp()->gtOp2 = impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); return src; } // Evaluate the second thing using recursion. return impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block); } else if (src->IsLocal()) { asgType = src->TypeGet(); } else if (asgType == TYP_STRUCT) { // It should already have the appropriate type. assert(asgType == impNormStructType(structHnd)); } if ((dest == nullptr) && (destAddr->OperGet() == GT_ADDR)) { GenTree* destNode = destAddr->gtGetOp1(); // If the actual destination is a local, a GT_INDEX or a block node, or is a node that // will be morphed, don't insert an OBJ(ADDR) if it already has the right type. if (destNode->OperIs(GT_LCL_VAR, GT_INDEX) || destNode->OperIsBlk()) { var_types destType = destNode->TypeGet(); // If one or both types are TYP_STRUCT (one may not yet be normalized), they are compatible // iff their handles are the same. // Otherwise, they are compatible if their types are the same. bool typesAreCompatible = ((destType == TYP_STRUCT) || (asgType == TYP_STRUCT)) ? ((gtGetStructHandleIfPresent(destNode) == structHnd) && varTypeIsStruct(asgType)) : (destType == asgType); if (typesAreCompatible) { dest = destNode; if (destType != TYP_STRUCT) { // Use a normalized type if available. We know from above that they're equivalent. asgType = destType; } } } } if (dest == nullptr) { if (asgType == TYP_STRUCT) { dest = gtNewObjNode(structHnd, destAddr); gtSetObjGcInfo(dest->AsObj()); // Although an obj as a call argument was always assumed to be a globRef // (which is itself overly conservative), that is not true of the operands // of a block assignment. dest->gtFlags &= ~GTF_GLOB_REF; dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF); } else { dest = gtNewOperNode(GT_IND, asgType, destAddr); } } if (dest->OperIs(GT_LCL_VAR) && (src->IsMultiRegNode() || (src->OperIs(GT_RET_EXPR) && src->AsRetExpr()->gtInlineCandidate->AsCall()->HasMultiRegRetVal()))) { if (lvaEnregMultiRegVars && varTypeIsStruct(dest)) { dest->AsLclVar()->SetMultiReg(); } if (src->OperIs(GT_CALL)) { lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true; } } dest->gtFlags |= destFlags; destFlags = dest->gtFlags; // return an assignment node, to be appended GenTree* asgNode = gtNewAssignNode(dest, src); gtBlockOpInit(asgNode, dest, src, false); // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs // of assignments. if ((destFlags & GTF_DONT_CSE) == 0) { dest->gtFlags &= ~(GTF_DONT_CSE); } return asgNode; } /***************************************************************************** Given a struct value, and the class handle for that structure, return the expression for the address for that structure value. willDeref - does the caller guarantee to dereference the pointer. */ GenTree* Compiler::impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref) { assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd)); var_types type = structVal->TypeGet(); genTreeOps oper = structVal->gtOper; if (oper == GT_OBJ && willDeref) { assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd); return (structVal->AsObj()->Addr()); } else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY || structVal->OperIsSimdOrHWintrinsic()) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The 'return value' is now the temp itself type = genActualType(lvaTable[tmpNum].TypeGet()); GenTree* temp = gtNewLclvNode(tmpNum, type); temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp); return temp; } else if (oper == GT_COMMA) { assert(structVal->AsOp()->gtOp2->gtType == type); // Second thing is the struct Statement* oldLastStmt = impLastStmt; structVal->AsOp()->gtOp2 = impGetStructAddr(structVal->AsOp()->gtOp2, structHnd, curLevel, willDeref); structVal->gtType = TYP_BYREF; if (oldLastStmt != impLastStmt) { // Some temp assignment statement was placed on the statement list // for Op2, but that would be out of order with op1, so we need to // spill op1 onto the statement list after whatever was last // before we recursed on Op2 (i.e. before whatever Op2 appended). Statement* beforeStmt; if (oldLastStmt == nullptr) { // The op1 stmt should be the first in the list. beforeStmt = impStmtList; } else { // Insert after the oldLastStmt before the first inserted for op2. beforeStmt = oldLastStmt->GetNextStmt(); } impInsertTreeBefore(structVal->AsOp()->gtOp1, impCurStmtDI, beforeStmt); structVal->AsOp()->gtOp1 = gtNewNothingNode(); } return (structVal); } return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } //------------------------------------------------------------------------ // impNormStructType: Normalize the type of a (known to be) struct class handle. // // Arguments: // structHnd - The class handle for the struct type of interest. // pSimdBaseJitType - (optional, default nullptr) - if non-null, and the struct is a SIMD // type, set to the SIMD base JIT type // // Return Value: // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*). // It may also modify the compFloatingPointUsed flag if the type is a SIMD type. // // Notes: // Normalizing the type involves examining the struct type to determine if it should // be modified to one that is handled specially by the JIT, possibly being a candidate // for full enregistration, e.g. TYP_SIMD16. If the size of the struct is already known // call structSizeMightRepresentSIMDType to determine if this api needs to be called. var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, CorInfoType* pSimdBaseJitType) { assert(structHnd != NO_CLASS_HANDLE); var_types structType = TYP_STRUCT; #ifdef FEATURE_SIMD if (supportSIMDTypes()) { const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd); // Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type. if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_BYREF_LIKE)) == 0) { unsigned originalSize = info.compCompHnd->getClassSize(structHnd); if (structSizeMightRepresentSIMDType(originalSize)) { unsigned int sizeBytes; CorInfoType simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(structHnd, &sizeBytes); if (simdBaseJitType != CORINFO_TYPE_UNDEF) { assert(sizeBytes == originalSize); structType = getSIMDTypeForSize(sizeBytes); if (pSimdBaseJitType != nullptr) { *pSimdBaseJitType = simdBaseJitType; } // Also indicate that we use floating point registers. compFloatingPointUsed = true; } } } } #endif // FEATURE_SIMD return structType; } //------------------------------------------------------------------------ // Compiler::impNormStructVal: Normalize a struct value // // Arguments: // structVal - the node we are going to normalize // structHnd - the class handle for the node // curLevel - the current stack level // forceNormalization - Force the creation of an OBJ node (default is false). // // Notes: // Given struct value 'structVal', make sure it is 'canonical', that is // it is either: // - a known struct type (non-TYP_STRUCT, e.g. TYP_SIMD8) // - an OBJ or a MKREFANY node, or // - a node (e.g. GT_INDEX) that will be morphed. // If the node is a CALL or RET_EXPR, a copy will be made to a new temp. // GenTree* Compiler::impNormStructVal(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool forceNormalization /*=false*/) { assert(forceNormalization || varTypeIsStruct(structVal)); assert(structHnd != NO_CLASS_HANDLE); var_types structType = structVal->TypeGet(); bool makeTemp = false; if (structType == TYP_STRUCT) { structType = impNormStructType(structHnd); } bool alreadyNormalized = false; GenTreeLclVarCommon* structLcl = nullptr; genTreeOps oper = structVal->OperGet(); switch (oper) { // GT_RETURN and GT_MKREFANY don't capture the handle. case GT_RETURN: break; case GT_MKREFANY: alreadyNormalized = true; break; case GT_CALL: structVal->AsCall()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_RET_EXPR: structVal->AsRetExpr()->gtRetClsHnd = structHnd; makeTemp = true; break; case GT_ARGPLACE: structVal->AsArgPlace()->gtArgPlaceClsHnd = structHnd; break; case GT_INDEX: // This will be transformed to an OBJ later. alreadyNormalized = true; structVal->AsIndex()->gtStructElemClass = structHnd; structVal->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(structHnd); break; case GT_FIELD: // Wrap it in a GT_OBJ, if needed. structVal->gtType = structType; if ((structType == TYP_STRUCT) || forceNormalization) { structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } break; case GT_LCL_VAR: case GT_LCL_FLD: structLcl = structVal->AsLclVarCommon(); // Wrap it in a GT_OBJ. structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); FALLTHROUGH; case GT_OBJ: case GT_BLK: case GT_ASG: // These should already have the appropriate type. assert(structVal->gtType == structType); alreadyNormalized = true; break; case GT_IND: assert(structVal->gtType == structType); structVal = gtNewObjNode(structHnd, structVal->gtGetOp1()); alreadyNormalized = true; break; #ifdef FEATURE_SIMD case GT_SIMD: assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType)); break; #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS case GT_HWINTRINSIC: assert(structVal->gtType == structType); assert(varTypeIsSIMD(structVal) || HWIntrinsicInfo::IsMultiReg(structVal->AsHWIntrinsic()->GetHWIntrinsicId())); break; #endif case GT_COMMA: { // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node. GenTree* blockNode = structVal->AsOp()->gtOp2; assert(blockNode->gtType == structType); // Is this GT_COMMA(op1, GT_COMMA())? GenTree* parent = structVal; if (blockNode->OperGet() == GT_COMMA) { // Find the last node in the comma chain. do { assert(blockNode->gtType == structType); parent = blockNode; blockNode = blockNode->AsOp()->gtOp2; } while (blockNode->OperGet() == GT_COMMA); } if (blockNode->OperGet() == GT_FIELD) { // If we have a GT_FIELD then wrap it in a GT_OBJ. blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode)); } #ifdef FEATURE_SIMD if (blockNode->OperIsSimdOrHWintrinsic()) { parent->AsOp()->gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization); alreadyNormalized = true; } else #endif { noway_assert(blockNode->OperIsBlk()); // Sink the GT_COMMA below the blockNode addr. // That is GT_COMMA(op1, op2=blockNode) is tranformed into // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)). // // In case of a chained GT_COMMA case, we sink the last // GT_COMMA below the blockNode addr. GenTree* blockNodeAddr = blockNode->AsOp()->gtOp1; assert(blockNodeAddr->gtType == TYP_BYREF); GenTree* commaNode = parent; commaNode->gtType = TYP_BYREF; commaNode->AsOp()->gtOp2 = blockNodeAddr; blockNode->AsOp()->gtOp1 = commaNode; if (parent == structVal) { structVal = blockNode; } alreadyNormalized = true; } } break; default: noway_assert(!"Unexpected node in impNormStructVal()"); break; } structVal->gtType = structType; if (!alreadyNormalized || forceNormalization) { if (makeTemp) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj")); impAssignTempGen(tmpNum, structVal, structHnd, curLevel); // The structVal is now the temp itself structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon(); structVal = structLcl; } if ((forceNormalization || (structType == TYP_STRUCT)) && !structVal->OperIsBlk()) { // Wrap it in a GT_OBJ structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal)); } } if (structLcl != nullptr) { // A OBJ on a ADDR(LCL_VAR) can never raise an exception // so we don't set GTF_EXCEPT here. if (!lvaIsImplicitByRefLocal(structLcl->GetLclNum())) { structVal->gtFlags &= ~GTF_GLOB_REF; } } else if (structVal->OperIsBlk()) { // In general a OBJ is an indirection and could raise an exception. structVal->gtFlags |= GTF_EXCEPT; } return structVal; } /******************************************************************************/ // Given a type token, generate code that will evaluate to the correct // handle representation of that token (type handle, field handle, or method handle) // // For most cases, the handle is determined at compile-time, and the code // generated is simply an embedded handle. // // Run-time lookup is required if the enclosing method is shared between instantiations // and the token refers to formal type parameters whose instantiation is not known // at compile-time. // GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool* pRuntimeLookup /* = NULL */, bool mustRestoreHandle /* = false */, bool importParent /* = false */) { assert(!fgGlobalMorph); CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo); if (pRuntimeLookup) { *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup; } if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup) { switch (embedInfo.handleType) { case CORINFO_HANDLETYPE_CLASS: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_METHOD: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle); break; case CORINFO_HANDLETYPE_FIELD: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle)); break; default: break; } } // Generate the full lookup tree. May be null if we're abandoning an inline attempt. GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token), embedInfo.compileTimeHandle); // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node. if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup) { result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result); } return result; } GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { if (!pLookup->lookupKind.needsRuntimeLookup) { // No runtime lookup is required. // Access is direct or memory-indirect (of a fixed address) reference CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE); if (pLookup->constLookup.accessType == IAT_VALUE) { handle = pLookup->constLookup.handle; } else if (pLookup->constLookup.accessType == IAT_PVALUE) { pIndirection = pLookup->constLookup.addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG size_t handleToTrack; if (handleFlags == GTF_ICON_TOKEN_HDL) { handleToTrack = 0; } else { handleToTrack = (size_t)compileTimeHandle; } if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = handleToTrack; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = handleToTrack; } #endif return addr; } if (pLookup->lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case assert(compIsForInlining()); compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP); return nullptr; } // Need to use dictionary-based access which depends on the typeContext // which is only available at runtime, not at compile-time. return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle); } #ifdef FEATURE_READYTORUN GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup, GenTreeFlags handleFlags, void* compileTimeHandle) { CORINFO_GENERIC_HANDLE handle = nullptr; void* pIndirection = nullptr; assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE); if (pLookup->accessType == IAT_VALUE) { handle = pLookup->handle; } else if (pLookup->accessType == IAT_PVALUE) { pIndirection = pLookup->addr; } GenTree* addr = gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle); #ifdef DEBUG assert((handleFlags == GTF_ICON_CLASS_HDL) || (handleFlags == GTF_ICON_METHOD_HDL)); if (handle != nullptr) { addr->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } else { addr->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)compileTimeHandle; } #endif // DEBUG return addr; } //------------------------------------------------------------------------ // impIsCastHelperEligibleForClassProbe: Checks whether a tree is a cast helper eligible to // to be profiled and then optimized with PGO data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper eligible to be profiled // bool Compiler::impIsCastHelperEligibleForClassProbe(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } //------------------------------------------------------------------------ // impIsCastHelperMayHaveProfileData: Checks whether a tree is a cast helper that might // have profile data // // Arguments: // tree - the tree object to check // // Returns: // true if the tree is a cast helper with potential profile data // bool Compiler::impIsCastHelperMayHaveProfileData(GenTree* tree) { if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT) || (JitConfig.JitCastProfiling() != 1)) { return false; } if (tree->IsCall() && tree->AsCall()->gtCallType == CT_HELPER) { const CorInfoHelpFunc helper = eeGetHelperNum(tree->AsCall()->gtCallMethHnd); if ((helper == CORINFO_HELP_ISINSTANCEOFINTERFACE) || (helper == CORINFO_HELP_ISINSTANCEOFCLASS) || (helper == CORINFO_HELP_CHKCASTCLASS) || (helper == CORINFO_HELP_CHKCASTINTERFACE)) { return true; } } return false; } GenTreeCall* Compiler::impReadyToRunHelperToTree( CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoHelpFunc helper, var_types type, GenTreeCall::Use* args /* = nullptr */, CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */) { CORINFO_CONST_LOOKUP lookup; if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup)) { return nullptr; } GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args); op1->setEntryPoint(lookup); return op1; } #endif GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* op1 = nullptr; switch (pCallInfo->kind) { case CORINFO_CALL: op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod); #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1->AsFptrVal()->gtEntryPoint = pCallInfo->codePointerLookup.constLookup; } #endif break; case CORINFO_CALL_CODE_POINTER: op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod); break; default: noway_assert(!"unknown call kind"); break; } return op1; } //------------------------------------------------------------------------ // getRuntimeContextTree: find pointer to context for runtime lookup. // // Arguments: // kind - lookup kind. // // Return Value: // Return GenTree pointer to generic shared context. // // Notes: // Reports about generic context using. GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind) { GenTree* ctxTree = nullptr; // Collectible types requires that for shared generic code, if we use the generic context parameter // that we report it. (This is a conservative approach, we could detect some cases particularly when the // context parameter is this that we don't need the eager reporting logic.) lvaGenericsContextInUse = true; Compiler* pRoot = impInlineRoot(); if (kind == CORINFO_LOOKUP_THISOBJ) { // this Object ctxTree = gtNewLclvNode(pRoot->info.compThisArg, TYP_REF); ctxTree->gtFlags |= GTF_VAR_CONTEXT; // context is the method table pointer of the this object ctxTree = gtNewMethodTableLookup(ctxTree); } else { assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM); // Exact method descriptor as passed in ctxTree = gtNewLclvNode(pRoot->info.compTypeCtxtArg, TYP_I_IMPL); ctxTree->gtFlags |= GTF_VAR_CONTEXT; } return ctxTree; } /*****************************************************************************/ /* Import a dictionary lookup to access a handle in code shared between generic instantiations. The lookup depends on the typeContext which is only available at runtime, and not at compile-time. pLookup->token1 and pLookup->token2 specify the handle that is needed. The cases are: 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the instantiation-specific handle, and the tokens to lookup the handle. 2. pLookup->indirections != CORINFO_USEHELPER : 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle to get the handle. 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle. If it is non-NULL, it is the handle required. Else, call a helper to lookup the handle. */ GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_LOOKUP* pLookup, void* compileTimeHandle) { GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind); CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup; // It's available only via the run-time helper function if (pRuntimeLookup->indirections == CORINFO_USEHELPER) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pLookup->lookupKind); } #endif return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup, ctxTree, compileTimeHandle); } // Slot pointer GenTree* slotPtrTree = ctxTree; if (pRuntimeLookup->testForNull) { slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup slot")); } GenTree* indOffTree = nullptr; GenTree* lastIndOfTree = nullptr; // Applied repeated indirections for (WORD i = 0; i < pRuntimeLookup->indirections; i++) { if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } // The last indirection could be subject to a size check (dynamic dictionary expansion) bool isLastIndirectionWithSizeCheck = ((i == pRuntimeLookup->indirections - 1) && (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK)); if (i != 0) { slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!isLastIndirectionWithSizeCheck) { slotPtrTree->gtFlags |= GTF_IND_INVARIANT; } } if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset)) { slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree); } if (pRuntimeLookup->offsets[i] != 0) { if (isLastIndirectionWithSizeCheck) { lastIndOfTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("impRuntimeLookup indirectOffset")); } slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL)); } } // No null test required if (!pRuntimeLookup->testForNull) { if (pRuntimeLookup->indirections == 0) { return slotPtrTree; } slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); slotPtrTree->gtFlags |= GTF_IND_NONFAULTING; if (!pRuntimeLookup->testForFixup) { return slotPtrTree; } impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0")); unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test")); impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtDI); GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); // downcast the pointer to a TYP_INT on 64-bit targets slot = impImplicitIorI4Cast(slot, TYP_INT); // Use a GT_AND to check for the lowest bit and indirect if it is set GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1)); GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0)); // slot = GT_IND(slot - 1) slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL)); GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add); indir->gtFlags |= GTF_IND_NONFAULTING; indir->gtFlags |= GTF_IND_INVARIANT; slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL); GenTree* asg = gtNewAssignNode(slot, indir); GenTreeColon* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg); GenTreeQmark* qmark = gtNewQmarkNode(TYP_VOID, relop, colon); impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); return gtNewLclvNode(slotLclNum, TYP_I_IMPL); } assert(pRuntimeLookup->indirections != 0); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1")); // Extract the handle GenTree* handleForNullCheck = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree); handleForNullCheck->gtFlags |= GTF_IND_NONFAULTING; // Call the helper // - Setup argNode with the pointer to the signature returned by the lookup GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_GLOBAL_PTR, compileTimeHandle); GenTreeCall::Use* helperArgs = gtNewCallArgs(ctxTree, argNode); GenTreeCall* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs); // Check for null and possibly call helper GenTree* nullCheck = gtNewOperNode(GT_NE, TYP_INT, handleForNullCheck, gtNewIconNode(0, TYP_I_IMPL)); GenTree* handleForResult = gtCloneExpr(handleForNullCheck); GenTree* result = nullptr; if (pRuntimeLookup->sizeOffset != CORINFO_NO_SIZE_CHECK) { // Dynamic dictionary expansion support assert((lastIndOfTree != nullptr) && (pRuntimeLookup->indirections > 0)); // sizeValue = dictionary[pRuntimeLookup->sizeOffset] GenTreeIntCon* sizeOffset = gtNewIconNode(pRuntimeLookup->sizeOffset, TYP_I_IMPL); GenTree* sizeValueOffset = gtNewOperNode(GT_ADD, TYP_I_IMPL, lastIndOfTree, sizeOffset); GenTree* sizeValue = gtNewOperNode(GT_IND, TYP_I_IMPL, sizeValueOffset); sizeValue->gtFlags |= GTF_IND_NONFAULTING; // sizeCheck fails if sizeValue < pRuntimeLookup->offsets[i] GenTree* offsetValue = gtNewIconNode(pRuntimeLookup->offsets[pRuntimeLookup->indirections - 1], TYP_I_IMPL); GenTree* sizeCheck = gtNewOperNode(GT_LE, TYP_INT, sizeValue, offsetValue); // revert null check condition. nullCheck->ChangeOperUnchecked(GT_EQ); // ((sizeCheck fails || nullCheck fails))) ? (helperCall : handle). // Add checks and the handle as call arguments, indirect call transformer will handle this. helperCall->gtCallArgs = gtPrependNewCallArg(handleForResult, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(sizeCheck, helperCall->gtCallArgs); helperCall->gtCallArgs = gtPrependNewCallArg(nullCheck, helperCall->gtCallArgs); result = helperCall; addExpRuntimeLookupCandidate(helperCall); } else { GenTreeColon* colonNullCheck = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, handleForResult, helperCall); result = gtNewQmarkNode(TYP_I_IMPL, nullCheck, colonNullCheck); } unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling Runtime Lookup tree")); impAssignTempGen(tmp, result, (unsigned)CHECK_SPILL_NONE); return gtNewLclvNode(tmp, TYP_I_IMPL); } /****************************************************************************** * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp. * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum, * else, grab a new temp. * For structs (which can be pushed on the stack using obj, etc), * special handling is needed */ struct RecursiveGuard { public: RecursiveGuard() { m_pAddress = nullptr; } ~RecursiveGuard() { if (m_pAddress) { *m_pAddress = false; } } void Init(bool* pAddress, bool bInitialize) { assert(pAddress && *pAddress == false && "Recursive guard violation"); m_pAddress = pAddress; if (bInitialize) { *m_pAddress = true; } } protected: bool* m_pAddress; }; bool Compiler::impSpillStackEntry(unsigned level, unsigned tnum #ifdef DEBUG , bool bAssertOnRecursion, const char* reason #endif ) { #ifdef DEBUG RecursiveGuard guard; guard.Init(&impNestedStackSpill, bAssertOnRecursion); #endif GenTree* tree = verCurrentState.esStack[level].val; /* Allocate a temp if we haven't been asked to use a particular one */ if (tnum != BAD_VAR_NUM && (tnum >= lvaCount)) { return false; } bool isNewTemp = false; if (tnum == BAD_VAR_NUM) { tnum = lvaGrabTemp(true DEBUGARG(reason)); isNewTemp = true; } /* Assign the spilled entry to the temp */ impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level); // If temp is newly introduced and a ref type, grab what type info we can. if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF)) { assert(lvaTable[tnum].lvSingleDef == 0); lvaTable[tnum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tnum); CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle(); lvaSetClass(tnum, tree, stkHnd); // If we're assigning a GT_RET_EXPR, note the temp over on the call, // so the inliner can use it in case it needs a return spill temp. if (tree->OperGet() == GT_RET_EXPR) { JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum); GenTree* call = tree->AsRetExpr()->gtInlineCandidate; InlineCandidateInfo* ici = call->AsCall()->gtInlineCandidateInfo; ici->preexistingSpillTemp = tnum; } } // The tree type may be modified by impAssignTempGen, so use the type of the lclVar. var_types type = genActualType(lvaTable[tnum].TypeGet()); GenTree* temp = gtNewLclvNode(tnum, type); verCurrentState.esStack[level].val = temp; return true; } /***************************************************************************** * * Ensure that the stack has only spilled values */ void Compiler::impSpillStackEnsure(bool spillLeaves) { assert(!spillLeaves || opts.compDbgCode); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (!spillLeaves && tree->OperIsLeaf()) { continue; } // Temps introduced by the importer itself don't need to be spilled bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->AsLclVarCommon()->GetLclNum() >= info.compLocalsCount); if (isTempLcl) { continue; } impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure")); } } void Compiler::impSpillEvalStack() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack")); } } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and append the assignments to the statement list. * On return the stack is guaranteed to be empty. */ inline void Compiler::impEvalSideEffects() { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects")); verCurrentState.esStackDepth = 0; } /***************************************************************************** * * If the stack contains any trees with side effects in them, assign those * trees to temps and replace them on the stack with refs to their temps. * [0..chkLevel) is the portion of the stack which will be checked and spilled. */ inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason)) { assert(chkLevel != (unsigned)CHECK_SPILL_NONE); /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */ impSpillSpecialSideEff(); if (chkLevel == (unsigned)CHECK_SPILL_ALL) { chkLevel = verCurrentState.esStackDepth; } assert(chkLevel <= verCurrentState.esStackDepth); GenTreeFlags spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT; for (unsigned i = 0; i < chkLevel; i++) { GenTree* tree = verCurrentState.esStack[i].val; if ((tree->gtFlags & spillFlags) != 0 || (spillGlobEffects && // Only consider the following when spillGlobEffects == true !impIsAddressInLocal(tree) && // No need to spill the GT_ADDR node on a local. gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or // lvAddrTaken flag. { impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason)); } } } /***************************************************************************** * * If the stack contains any trees with special side effects in them, assign * those trees to temps and replace them on the stack with refs to their temps. */ inline void Compiler::impSpillSpecialSideEff() { // Only exception objects need to be carefully handled if (!compCurBB->bbCatchTyp) { return; } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; // Make sure if we have an exception object in the sub tree we spill ourselves. if (gtHasCatchArg(tree)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff")); } } } /***************************************************************************** * * Spill all stack references to value classes (TYP_STRUCT nodes) */ void Compiler::impSpillValueClasses() { for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT) { // Tree walk was aborted, which means that we found a // value class on the stack. Need to spill that // stack entry. impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses")); } } } /***************************************************************************** * * Callback that checks if a tree node is TYP_STRUCT */ Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data) { fgWalkResult walkResult = WALK_CONTINUE; if ((*pTree)->gtType == TYP_STRUCT) { // Abort the walk and indicate that we found a value class walkResult = WALK_ABORT; } return walkResult; } /***************************************************************************** * * If the stack contains any trees with references to local #lclNum, assign * those trees to temps and replace their place on the stack with refs to * their temps. */ void Compiler::impSpillLclRefs(ssize_t lclNum) { /* Before we make any appends to the tree list we must spill the * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */ impSpillSpecialSideEff(); for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTree* tree = verCurrentState.esStack[level].val; /* If the tree may throw an exception, and the block has a handler, then we need to spill assignments to the local if the local is live on entry to the handler. Just spill 'em all without considering the liveness */ bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT)); /* Skip the tree if it doesn't have an affected reference, unless xcptnCaught */ if (xcptnCaught || gtHasRef(tree, lclNum)) { impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs")); } } } /***************************************************************************** * * Push catch arg onto the stack. * If there are jumps to the beginning of the handler, insert basic block * and spill catch arg to a temp. Update the handler block if necessary. * * Returns the basic block of the actual handler. */ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter) { // Do not inject the basic block twice on reimport. This should be // hit only under JIT stress. See if the block is the one we injected. // Note that EH canonicalization can inject internal blocks here. We might // be able to re-use such a block (but we don't, right now). if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) == (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE)) { Statement* stmt = hndBlk->firstStmt(); if (stmt != nullptr) { GenTree* tree = stmt->GetRootNode(); assert(tree != nullptr); if ((tree->gtOper == GT_ASG) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR) && (tree->AsOp()->gtOp2->gtOper == GT_CATCH_ARG)) { tree = gtNewLclvNode(tree->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(), TYP_REF); impPushOnStack(tree, typeInfo(TI_REF, clsHnd)); return hndBlk->bbNext; } } // If we get here, it must have been some other kind of internal block. It's possible that // someone prepended something to our injected block, but that's unlikely. } /* Push the exception address value on the stack */ GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF); /* Mark the node as having a side-effect - i.e. cannot be * moved around since it is tied to a fixed location (EAX) */ arg->gtFlags |= GTF_ORDER_SIDEEFF; #if defined(JIT32_GCENCODER) const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5); #else const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5); #endif // defined(JIT32_GCENCODER) /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */ if (hndBlk->bbRefs > 1 || forceInsertNewBlock) { if (hndBlk->bbRefs == 1) { hndBlk->bbRefs++; } /* Create extra basic block for the spill */ BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true); newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE; newBlk->inheritWeight(hndBlk); newBlk->bbCodeOffs = hndBlk->bbCodeOffs; /* Account for the new link we are about to create */ hndBlk->bbRefs++; // Spill into a temp. unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); lvaTable[tempNum].lvType = TYP_REF; GenTree* argAsg = gtNewTempAssign(tempNum, arg); arg = gtNewLclvNode(tempNum, TYP_REF); hndBlk->bbStkTempsIn = tempNum; Statement* argStmt; if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { // Report the debug info. impImportBlockCode won't treat the actual handler as exception block and thus // won't do it for us. // TODO-DEBUGINFO: Previous code always set stack as non-empty // here. Can we not just use impCurStmtOffsSet? Are we out of sync // here with the stack? impCurStmtDI = DebugInfo(compInlineContext, ILLocation(newBlk->bbCodeOffs, false, false)); argStmt = gtNewStmt(argAsg, impCurStmtDI); } else { argStmt = gtNewStmt(argAsg); } fgInsertStmtAtEnd(newBlk, argStmt); } impPushOnStack(arg, typeInfo(TI_REF, clsHnd)); return hndBlk; } /***************************************************************************** * * Given a tree, clone it. *pClone is set to the cloned tree. * Returns the original tree if the cloning was easy, * else returns the temp to which the tree had to be spilled to. * If the tree has side-effects, it will be spilled to a temp. */ GenTree* Compiler::impCloneExpr(GenTree* tree, GenTree** pClone, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, Statement** pAfterStmt DEBUGARG(const char* reason)) { if (!(tree->gtFlags & GTF_GLOB_EFFECT)) { GenTree* clone = gtClone(tree, true); if (clone) { *pClone = clone; return tree; } } /* Store the operand in a temp and return the temp */ unsigned temp = lvaGrabTemp(true DEBUGARG(reason)); // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which // return a struct type. It also may modify the struct type to a more // specialized type (e.g. a SIMD type). So we will get the type from // the lclVar AFTER calling impAssignTempGen(). impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtDI); var_types type = genActualType(lvaTable[temp].TypeGet()); *pClone = gtNewLclvNode(temp, type); return gtNewLclvNode(temp, type); } //------------------------------------------------------------------------ // impCreateDIWithCurrentStackInfo: Create a DebugInfo instance with the // specified IL offset and 'is call' bit, using the current stack to determine // whether to set the 'stack empty' bit. // // Arguments: // offs - the IL offset for the DebugInfo // isCall - whether the created DebugInfo should have the IsCall bit set // // Return Value: // The DebugInfo instance. // DebugInfo Compiler::impCreateDIWithCurrentStackInfo(IL_OFFSET offs, bool isCall) { assert(offs != BAD_IL_OFFSET); bool isStackEmpty = verCurrentState.esStackDepth <= 0; return DebugInfo(compInlineContext, ILLocation(offs, isStackEmpty, isCall)); } //------------------------------------------------------------------------ // impCurStmtOffsSet: Set the "current debug info" to attach to statements that // we are generating next. // // Arguments: // offs - the IL offset // // Remarks: // This function will be called in the main IL processing loop when it is // determined that we have reached a location in the IL stream for which we // want to report debug information. This is the main way we determine which // statements to report debug info for to the EE: for other statements, they // will have no debug information attached. // inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs) { if (offs == BAD_IL_OFFSET) { impCurStmtDI = DebugInfo(compInlineContext, ILLocation()); } else { impCurStmtDI = impCreateDIWithCurrentStackInfo(offs, false); } } //------------------------------------------------------------------------ // impCanSpillNow: check is it possible to spill all values from eeStack to local variables. // // Arguments: // prevOpcode - last importer opcode // // Return Value: // true if it is legal, false if it could be a sequence that we do not want to divide. bool Compiler::impCanSpillNow(OPCODE prevOpcode) { // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence. // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed. return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ); } /***************************************************************************** * * Remember the instr offset for the statements * * When we do impAppendTree(tree), we can't set stmt->SetLastILOffset(impCurOpcOffs), * if the append was done because of a partial stack spill, * as some of the trees corresponding to code up to impCurOpcOffs might * still be sitting on the stack. * So we delay calling of SetLastILOffset() until impNoteLastILoffs(). * This should be called when an opcode finally/explicitly causes * impAppendTree(tree) to be called (as opposed to being called because of * a spill caused by the opcode) */ #ifdef DEBUG void Compiler::impNoteLastILoffs() { if (impLastILoffsStmt == nullptr) { // We should have added a statement for the current basic block // Is this assert correct ? assert(impLastStmt); impLastStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); } else { impLastILoffsStmt->SetLastILOffset(compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs); impLastILoffsStmt = nullptr; } } #endif // DEBUG /***************************************************************************** * We don't create any GenTree (excluding spills) for a branch. * For debugging info, we need a placeholder so that we can note * the IL offset in gtStmt.gtStmtOffs. So append an empty statement. */ void Compiler::impNoteBranchOffs() { if (opts.compDbgCode) { impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } } /***************************************************************************** * Locate the next stmt boundary for which we need to record info. * We will have to spill the stack at such boundaries if it is not * already empty. * Returns the next stmt boundary (after the start of the block) */ unsigned Compiler::impInitBlockLineInfo() { /* Assume the block does not correspond with any IL offset. This prevents us from reporting extra offsets. Extra mappings can cause confusing stepping, especially if the extra mapping is a jump-target, and the debugger does not ignore extra mappings, but instead rewinds to the nearest known offset */ impCurStmtOffsSet(BAD_IL_OFFSET); IL_OFFSET blockOffs = compCurBB->bbCodeOffs; if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES)) { impCurStmtOffsSet(blockOffs); } /* Always report IL offset 0 or some tests get confused. Probably a good idea anyways */ if (blockOffs == 0) { impCurStmtOffsSet(blockOffs); } if (!info.compStmtOffsetsCount) { return ~0; } /* Find the lowest explicit stmt boundary within the block */ /* Start looking at an entry that is based on our instr offset */ unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize; if (index >= info.compStmtOffsetsCount) { index = info.compStmtOffsetsCount - 1; } /* If we've guessed too far, back up */ while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs) { index--; } /* If we guessed short, advance ahead */ while (info.compStmtOffsets[index] < blockOffs) { index++; if (index == info.compStmtOffsetsCount) { return info.compStmtOffsetsCount; } } assert(index < info.compStmtOffsetsCount); if (info.compStmtOffsets[index] == blockOffs) { /* There is an explicit boundary for the start of this basic block. So we will start with bbCodeOffs. Else we will wait until we get to the next explicit boundary */ impCurStmtOffsSet(blockOffs); index++; } return index; } /*****************************************************************************/ bool Compiler::impOpcodeIsCallOpcode(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: return true; default: return false; } } /*****************************************************************************/ static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode) { switch (opcode) { case CEE_CALL: case CEE_CALLI: case CEE_CALLVIRT: case CEE_JMP: case CEE_NEWOBJ: case CEE_NEWARR: return true; default: return false; } } /*****************************************************************************/ // One might think it is worth caching these values, but results indicate // that it isn't. // In addition, caching them causes SuperPMI to be unable to completely // encapsulate an individual method context. CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass() { CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF); assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr); return refAnyClass; } CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass() { CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE); assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr); return typeHandleClass; } CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle() { CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE); assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr); return argIteratorClass; } CORINFO_CLASS_HANDLE Compiler::impGetStringClass() { CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING); assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr); return stringClass; } CORINFO_CLASS_HANDLE Compiler::impGetObjectClass() { CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT); assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr); return objectClass; } /***************************************************************************** * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we * set its type to TYP_BYREF when we create it. We know if it can be * changed to TYP_I_IMPL only at the point where we use it */ /* static */ void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2) { if (tree1->IsLocalAddrExpr() != nullptr) { tree1->gtType = TYP_I_IMPL; } if (tree2 && (tree2->IsLocalAddrExpr() != nullptr)) { tree2->gtType = TYP_I_IMPL; } } /***************************************************************************** * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want * to make that an explicit cast in our trees, so any implicit casts that * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are * turned into explicit casts here. * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0) */ GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp) { var_types currType = genActualType(tree->gtType); var_types wantedType = genActualType(dstTyp); if (wantedType != currType) { // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp)) { if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->AsIntCon()->gtIconVal == 0))) { tree->gtType = TYP_I_IMPL; } } #ifdef TARGET_64BIT else if (varTypeIsI(wantedType) && (currType == TYP_INT)) { // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } else if ((wantedType == TYP_INT) && varTypeIsI(currType)) { // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT); } #endif // TARGET_64BIT } return tree; } /***************************************************************************** * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases, * but we want to make that an explicit cast in our trees, so any implicit casts * that exist in the IL are turned into explicit casts here. */ GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp) { if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType)) { tree = gtNewCastNode(dstTyp, tree, false, dstTyp); } return tree; } //------------------------------------------------------------------------ // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray // with a GT_COPYBLK node. // // Arguments: // sig - The InitializeArray signature. // // Return Value: // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or // nullptr otherwise. // // Notes: // The function recognizes the following IL pattern: // ldc <length> or a list of ldc <lower bound>/<length> // newarr or newobj // dup // ldtoken <field handle> // call InitializeArray // The lower bounds need not be constant except when the array rank is 1. // The function recognizes all kinds of arrays thus enabling a small runtime // such as CoreRT to skip providing an implementation for InitializeArray. GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 2); GenTree* fieldTokenNode = impStackTop(0).val; GenTree* arrayLocalNode = impStackTop(1).val; // // Verify that the field token is known and valid. Note that It's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } // // We need to get the number of elements in the array and the size of each element. // We verify that the newarr statement is exactly what we expect it to be. // If it's not then we just return NULL and we don't optimize this call // // It is possible the we don't have any statements in the block yet. if (impLastStmt == nullptr) { return nullptr; } // // We start by looking at the last statement, making sure it's an assignment, and // that the target of the assignment is the array passed to InitializeArray. // GenTree* arrayAssignment = impLastStmt->GetRootNode(); if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->AsOp()->gtOp1->gtOper != GT_LCL_VAR) || (arrayLocalNode->gtOper != GT_LCL_VAR) || (arrayAssignment->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum() != arrayLocalNode->AsLclVarCommon()->GetLclNum())) { return nullptr; } // // Make sure that the object being assigned is a helper call. // GenTree* newArrayCall = arrayAssignment->AsOp()->gtOp2; if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->AsCall()->gtCallType != CT_HELPER)) { return nullptr; } // // Verify that it is one of the new array helpers. // bool isMDArray = false; if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8) #ifdef FEATURE_READYTORUN && newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1) #endif ) { if (newArrayCall->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR)) { return nullptr; } isMDArray = true; } CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->AsCall()->compileTimeHelperArgumentHandle; // // Make sure we found a compile time handle to the array // if (!arrayClsHnd) { return nullptr; } unsigned rank = 0; S_UINT32 numElements; if (isMDArray) { rank = info.compCompHnd->getArrayRank(arrayClsHnd); if (rank == 0) { return nullptr; } GenTreeCall::Use* tokenArg = newArrayCall->AsCall()->gtCallArgs; assert(tokenArg != nullptr); GenTreeCall::Use* numArgsArg = tokenArg->GetNext(); assert(numArgsArg != nullptr); GenTreeCall::Use* argsArg = numArgsArg->GetNext(); assert(argsArg != nullptr); // // The number of arguments should be a constant between 1 and 64. The rank can't be 0 // so at least one length must be present and the rank can't exceed 32 so there can // be at most 64 arguments - 32 lengths and 32 lower bounds. // if ((!numArgsArg->GetNode()->IsCnsIntOrI()) || (numArgsArg->GetNode()->AsIntCon()->IconValue() < 1) || (numArgsArg->GetNode()->AsIntCon()->IconValue() > 64)) { return nullptr; } unsigned numArgs = static_cast<unsigned>(numArgsArg->GetNode()->AsIntCon()->IconValue()); bool lowerBoundsSpecified; if (numArgs == rank * 2) { lowerBoundsSpecified = true; } else if (numArgs == rank) { lowerBoundsSpecified = false; // // If the rank is 1 and a lower bound isn't specified then the runtime creates // a SDArray. Note that even if a lower bound is specified it can be 0 and then // we get a SDArray as well, see the for loop below. // if (rank == 1) { isMDArray = false; } } else { return nullptr; } // // The rank is known to be at least 1 so we can start with numElements being 1 // to avoid the need to special case the first dimension. // numElements = S_UINT32(1); struct Match { static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) && (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) && IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs); } static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs) { return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) && (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs); } static bool IsComma(GenTree* tree) { return (tree != nullptr) && (tree->OperGet() == GT_COMMA); } }; unsigned argIndex = 0; GenTree* comma; for (comma = argsArg->GetNode(); Match::IsComma(comma); comma = comma->gtGetOp2()) { if (lowerBoundsSpecified) { // // In general lower bounds can be ignored because they're not needed to // calculate the total number of elements. But for single dimensional arrays // we need to know if the lower bound is 0 because in this case the runtime // creates a SDArray and this affects the way the array data offset is calculated. // if (rank == 1) { GenTree* lowerBoundAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2(); if (lowerBoundNode->IsIntegralConst(0)) { isMDArray = false; } } comma = comma->gtGetOp2(); argIndex++; } GenTree* lengthNodeAssign = comma->gtGetOp1(); assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs)); GenTree* lengthNode = lengthNodeAssign->gtGetOp2(); if (!lengthNode->IsCnsIntOrI()) { return nullptr; } numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue()); argIndex++; } assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs)); if (argIndex != numArgs) { return nullptr; } } else { // // Make sure there are exactly two arguments: the array class and // the number of elements. // GenTree* arrayLengthNode; GenTreeCall::Use* args = newArrayCall->AsCall()->gtCallArgs; #ifdef FEATURE_READYTORUN if (newArrayCall->AsCall()->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)) { // Array length is 1st argument for readytorun helper arrayLengthNode = args->GetNode(); } else #endif { // Array length is 2nd argument for regular helper arrayLengthNode = args->GetNext()->GetNode(); } // // This optimization is only valid for a constant array size. // if (arrayLengthNode->gtOper != GT_CNS_INT) { return nullptr; } numElements = S_SIZE_T(arrayLengthNode->AsIntCon()->gtIconVal); if (!info.compCompHnd->isSDArray(arrayClsHnd)) { return nullptr; } } CORINFO_CLASS_HANDLE elemClsHnd; var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd)); // // Note that genTypeSize will return zero for non primitive types, which is exactly // what we want (size will then be 0, and we will catch this in the conditional below). // Note that we don't expect this to fail for valid binaries, so we assert in the // non-verification case (the verification case should not assert but rather correctly // handle bad binaries). This assert is not guarding any specific invariant, but rather // saying that we don't expect this to happen, and if it is hit, we need to investigate // why. // S_UINT32 elemSize(genTypeSize(elementType)); S_UINT32 size = elemSize * S_UINT32(numElements); if (size.IsOverflow()) { return nullptr; } if ((size.Value() == 0) || (varTypeIsGC(elementType))) { return nullptr; } void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value()); if (!initData) { return nullptr; } // // At this point we are ready to commit to implementing the InitializeArray // intrinsic using a struct assignment. Pop the arguments from the stack and // return the struct assignment node. // impPopStack(); impPopStack(); const unsigned blkSize = size.Value(); unsigned dataOffset; if (isMDArray) { dataOffset = eeGetMDArrayDataOffset(rank); } else { dataOffset = eeGetArrayDataOffset(); } GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL)); GenTree* dst = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, dstAddr, typGetBlkLayout(blkSize)); GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_CONST_PTR, true); #ifdef DEBUG src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_IntializeArrayIntrinsics; #endif return gtNewBlkOpNode(dst, // dst src, // src false, // volatile true); // copyBlock } GenTree* Compiler::impCreateSpanIntrinsic(CORINFO_SIG_INFO* sig) { assert(sig->numArgs == 1); assert(sig->sigInst.methInstCount == 1); GenTree* fieldTokenNode = impStackTop(0).val; // // Verify that the field token is known and valid. Note that it's also // possible for the token to come from reflection, in which case we cannot do // the optimization and must therefore revert to calling the helper. You can // see an example of this in bvt\DynIL\initarray2.exe (in Main). // // Check to see if the ldtoken helper call is what we see here. if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->AsCall()->gtCallType != CT_HELPER) || (fieldTokenNode->AsCall()->gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD))) { return nullptr; } // Strip helper call away fieldTokenNode = fieldTokenNode->AsCall()->gtCallArgs->GetNode(); if (fieldTokenNode->gtOper == GT_IND) { fieldTokenNode = fieldTokenNode->AsOp()->gtOp1; } // Check for constant if (fieldTokenNode->gtOper != GT_CNS_INT) { return nullptr; } CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->AsIntCon()->gtCompileTimeHandle; if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr)) { return nullptr; } CORINFO_CLASS_HANDLE fieldOwnerHnd = info.compCompHnd->getFieldClass(fieldToken); CORINFO_CLASS_HANDLE fieldClsHnd; var_types fieldElementType = JITtype2varType(info.compCompHnd->getFieldType(fieldToken, &fieldClsHnd, fieldOwnerHnd)); unsigned totalFieldSize; // Most static initialization data fields are of some structure, but it is possible for them to be of various // primitive types as well if (fieldElementType == var_types::TYP_STRUCT) { totalFieldSize = info.compCompHnd->getClassSize(fieldClsHnd); } else { totalFieldSize = genTypeSize(fieldElementType); } // Limit to primitive or enum type - see ArrayNative::GetSpanDataFrom() CORINFO_CLASS_HANDLE targetElemHnd = sig->sigInst.methInst[0]; if (info.compCompHnd->getTypeForPrimitiveValueClass(targetElemHnd) == CORINFO_TYPE_UNDEF) { return nullptr; } const unsigned targetElemSize = info.compCompHnd->getClassSize(targetElemHnd); assert(targetElemSize != 0); const unsigned count = totalFieldSize / targetElemSize; if (count == 0) { return nullptr; } void* data = info.compCompHnd->getArrayInitializationData(fieldToken, totalFieldSize); if (!data) { return nullptr; } // // Ready to commit to the work // impPopStack(); // Turn count and pointer value into constants. GenTree* lengthValue = gtNewIconNode(count, TYP_INT); GenTree* pointerValue = gtNewIconHandleNode((size_t)data, GTF_ICON_CONST_PTR); // Construct ReadOnlySpan<T> to return. CORINFO_CLASS_HANDLE spanHnd = sig->retTypeClass; unsigned spanTempNum = lvaGrabTemp(true DEBUGARG("ReadOnlySpan<T> for CreateSpan<T>")); lvaSetStruct(spanTempNum, spanHnd, false); CORINFO_FIELD_HANDLE pointerFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 0); CORINFO_FIELD_HANDLE lengthFieldHnd = info.compCompHnd->getFieldInClass(spanHnd, 1); GenTreeLclFld* pointerField = gtNewLclFldNode(spanTempNum, TYP_BYREF, 0); pointerField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(pointerFieldHnd)); GenTree* pointerFieldAsg = gtNewAssignNode(pointerField, pointerValue); GenTreeLclFld* lengthField = gtNewLclFldNode(spanTempNum, TYP_INT, TARGET_POINTER_SIZE); lengthField->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(lengthFieldHnd)); GenTree* lengthFieldAsg = gtNewAssignNode(lengthField, lengthValue); // Now append a few statements the initialize the span impAppendTree(lengthFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); impAppendTree(pointerFieldAsg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // And finally create a tree that points at the span. return impCreateLocalNode(spanTempNum DEBUGARG(0)); } //------------------------------------------------------------------------ // impIntrinsic: possibly expand intrinsic call into alternate IR sequence // // Arguments: // newobjThis - for constructor calls, the tree for the newly allocated object // clsHnd - handle for the intrinsic method's class // method - handle for the intrinsic method // sig - signature of the intrinsic method // methodFlags - CORINFO_FLG_XXX flags of the intrinsic method // memberRef - the token for the intrinsic method // readonlyCall - true if call has a readonly prefix // tailCall - true if call is in tail position // pConstrainedResolvedToken -- resolved token for constrained call, or nullptr // if call is not constrained // constraintCallThisTransform -- this transform to apply for a constrained call // pIntrinsicName [OUT] -- intrinsic name (see enumeration in namedintrinsiclist.h) // for "traditional" jit intrinsics // isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call // that is amenable to special downstream optimization opportunities // // Returns: // IR tree to use in place of the call, or nullptr if the jit should treat // the intrinsic call like a normal call. // // pIntrinsicName set to non-illegal value if the call is recognized as a // traditional jit intrinsic, even if the intrinsic is not expaned. // // isSpecial set true if the expansion is subject to special // optimizations later in the jit processing // // Notes: // On success the IR tree may be a call to a different method or an inline // sequence. If it is a call, then the intrinsic processing here is responsible // for handling all the special cases, as upon return to impImportCall // expanded intrinsics bypass most of the normal call processing. // // Intrinsics are generally not recognized in minopts and debug codegen. // // However, certain traditional intrinsics are identifed as "must expand" // if there is no fallback implmentation to invoke; these must be handled // in all codegen modes. // // New style intrinsics (where the fallback implementation is in IL) are // identified as "must expand" if they are invoked from within their // own method bodies. // GenTree* Compiler::impIntrinsic(GenTree* newobjThis, CORINFO_CLASS_HANDLE clsHnd, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, unsigned methodFlags, int memberRef, bool readonlyCall, bool tailCall, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM constraintCallThisTransform, NamedIntrinsic* pIntrinsicName, bool* isSpecialIntrinsic) { assert((methodFlags & CORINFO_FLG_INTRINSIC) != 0); bool mustExpand = false; bool isSpecial = false; NamedIntrinsic ni = NI_Illegal; if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0) { // The recursive non-virtual calls to Jit intrinsics are must-expand by convention. mustExpand = mustExpand || (gtIsRecursiveCall(method) && !(methodFlags & CORINFO_FLG_VIRTUAL)); ni = lookupNamedIntrinsic(method); // We specially support the following on all platforms to allow for dead // code optimization and to more generally support recursive intrinsics. if (ni == NI_IsSupported_True) { assert(sig->numArgs == 0); return gtNewIconNode(true); } if (ni == NI_IsSupported_False) { assert(sig->numArgs == 0); return gtNewIconNode(false); } if (ni == NI_Throw_PlatformNotSupportedException) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } #ifdef FEATURE_HW_INTRINSICS if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END)) { GenTree* hwintrinsic = impHWIntrinsic(ni, clsHnd, method, sig, mustExpand); if (mustExpand && (hwintrinsic == nullptr)) { return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand); } return hwintrinsic; } if ((ni > NI_SIMD_AS_HWINTRINSIC_START) && (ni < NI_SIMD_AS_HWINTRINSIC_END)) { // These intrinsics aren't defined recursively and so they will never be mustExpand // Instead, they provide software fallbacks that will be executed instead. assert(!mustExpand); return impSimdAsHWIntrinsic(ni, clsHnd, method, sig, newobjThis); } #endif // FEATURE_HW_INTRINSICS } *pIntrinsicName = ni; if (ni == NI_System_StubHelpers_GetStubContext) { // must be done regardless of DbgCode and MinOpts return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL); } if (ni == NI_System_StubHelpers_NextCallReturnAddress) { // For now we just avoid inlining anything into these methods since // this intrinsic is only rarely used. We could do this better if we // wanted to by trying to match which call is the one we need to get // the return address of. info.compHasNextCallRetAddr = true; return new (this, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL); } switch (ni) { // CreateSpan must be expanded for NativeAOT case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: mustExpand |= IsTargetAbi(CORINFO_CORERT_ABI); break; case NI_System_ByReference_ctor: case NI_System_ByReference_get_Value: case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: mustExpand = true; break; default: break; } GenTree* retNode = nullptr; // Under debug and minopts, only expand what is required. // NextCallReturnAddress intrinsic returns the return address of the next call. // If that call is an intrinsic and is expanded, codegen for NextCallReturnAddress will fail. // To avoid that we conservatively expand only required intrinsics in methods that call // the NextCallReturnAddress intrinsic. if (!mustExpand && (opts.OptimizationDisabled() || info.compHasNextCallRetAddr)) { *pIntrinsicName = NI_Illegal; return retNode; } CorInfoType callJitType = sig->retType; var_types callType = JITtype2varType(callJitType); /* First do the intrinsics which are always smaller than a call */ if (ni != NI_Illegal) { assert(retNode == nullptr); switch (ni) { case NI_Array_Address: case NI_Array_Get: case NI_Array_Set: retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, ni); break; case NI_System_String_Equals: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_MemoryExtensions_Equals: case NI_System_MemoryExtensions_SequenceEqual: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ false, sig, methodFlags); break; } case NI_System_String_StartsWith: { retNode = impStringEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_StartsWith: { retNode = impSpanEqualsOrStartsWith(/*startsWith:*/ true, sig, methodFlags); break; } case NI_System_MemoryExtensions_AsSpan: case NI_System_String_op_Implicit: { assert(sig->numArgs == 1); isSpecial = impStackTop().val->OperIs(GT_CNS_STR); break; } case NI_System_String_get_Chars: { GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; retNode = gtNewIndexRef(TYP_USHORT, op1, op2); retNode->gtFlags |= GTF_INX_STRING_LAYOUT; break; } case NI_System_String_get_Length: { GenTree* op1 = impPopStack().val; if (op1->OperIs(GT_CNS_STR)) { // Optimize `ldstr + String::get_Length()` to CNS_INT // e.g. "Hello".Length => 5 GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon()); if (iconNode != nullptr) { retNode = iconNode; break; } } GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen, compCurBB); op1 = arrLen; // Getting the length of a null string should throw op1->gtFlags |= GTF_EXCEPT; retNode = op1; break; } // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field // in a value type. The canonical example of this is Span<T>. In effect this is just a // substitution. The parameter byref will be assigned into the newly allocated object. case NI_System_ByReference_ctor: { // Remove call to constructor and directly assign the byref passed // to the call to the first slot of the ByReference struct. GenTree* op1 = impPopStack().val; GenTree* thisptr = newobjThis; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0); GenTree* assign = gtNewAssignNode(field, op1); GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1()); assert(byReferenceStruct != nullptr); impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd)); retNode = assign; break; } // Implement ptr value getter for ByReference struct. case NI_System_ByReference_get_Value: { GenTree* op1 = impPopStack().val; CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0); retNode = field; break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan: { retNode = impCreateSpanIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray: { retNode = impInitializeArrayIntrinsic(sig); break; } case NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant: { GenTree* op1 = impPopStack().val; if (op1->OperIsConst()) { // op1 is a known constant, replace with 'true'. retNode = gtNewIconNode(1); JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to true early\n"); // We can also consider FTN_ADDR and typeof(T) here } else { // op1 is not a known constant, we'll do the expansion in morph retNode = new (this, GT_INTRINSIC) GenTreeIntrinsic(TYP_INT, op1, ni, method); JITDUMP("\nConverting RuntimeHelpers.IsKnownConstant to:\n"); DISPTREE(retNode); } break; } case NI_System_Activator_AllocatorOf: case NI_System_Activator_DefaultConstructorOf: case NI_System_Object_MethodTableOf: case NI_System_EETypePtr_EETypePtrOf: { assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it. CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = memberRef; resolvedToken.tokenType = CORINFO_TOKENKIND_Method; CORINFO_GENERICHANDLE_RESULT embedInfo; info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo); GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef), embedInfo.compileTimeHandle); if (rawHandle == nullptr) { return nullptr; } noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL)); unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle")); impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE); GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL); GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar); var_types resultType = JITtype2varType(sig->retType); retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr); break; } case NI_System_Span_get_Item: case NI_System_ReadOnlySpan_get_Item: { // Have index, stack pointer-to Span<T> s on the stack. Expand to: // // For Span<T> // Comma // BoundsCheck(index, s->_length) // s->_pointer + index * sizeof(T) // // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref // // Signature should show one class type parameter, which // we need to examine. assert(sig->sigInst.classInstCount == 1); assert(sig->numArgs == 1); CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0]; const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd); assert(elemSize > 0); const bool isReadOnly = (ni == NI_System_ReadOnlySpan_get_Item); JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "", info.compCompHnd->getClassName(spanElemHnd), elemSize); GenTree* index = impPopStack().val; GenTree* ptrToSpan = impPopStack().val; GenTree* indexClone = nullptr; GenTree* ptrToSpanClone = nullptr; assert(genActualType(index) == TYP_INT); assert(ptrToSpan->TypeGet() == TYP_BYREF); #if defined(DEBUG) if (verbose) { printf("with ptr-to-span\n"); gtDispTree(ptrToSpan); printf("and index\n"); gtDispTree(index); } #endif // defined(DEBUG) // We need to use both index and ptr-to-span twice, so clone or spill. index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item index")); ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("Span.get_Item ptrToSpan")); // Bounds check CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1); const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd); GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset); GenTree* boundsCheck = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, length, SCK_RNGCHK_FAIL); // Element access index = indexClone; #ifdef TARGET_64BIT if (index->OperGet() == GT_CNS_INT) { index->gtType = TYP_I_IMPL; } else { index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL); } #endif if (elemSize != 1) { GenTree* sizeofNode = gtNewIconNode(static_cast<ssize_t>(elemSize), TYP_I_IMPL); index = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, sizeofNode); } CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0); const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd); GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset); GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, index); // Prepare result var_types resultType = JITtype2varType(sig->retType); assert(resultType == result->TypeGet()); retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result); break; } case NI_System_RuntimeTypeHandle_GetValueInternal: { GenTree* op1 = impStackTop(0).val; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall())) { // Old tree // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle // // New tree // TreeToGetNativeTypeHandle // Remove call to helper and return the native TypeHandle pointer that was the parameter // to that helper. op1 = impPopStack().val; // Get native TypeHandle argument to old helper GenTreeCall::Use* arg = op1->AsCall()->gtCallArgs; assert(arg->GetNext() == nullptr); op1 = arg->GetNode(); retNode = op1; } // Call the regular function. break; } case NI_System_Type_GetTypeFromHandle: { GenTree* op1 = impStackTop(0).val; CorInfoHelpFunc typeHandleHelper; if (op1->gtOper == GT_CALL && (op1->AsCall()->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper)) { op1 = impPopStack().val; // Replace helper with a more specialized helper that returns RuntimeType if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE) { typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE; } else { assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL); typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL; } assert(op1->AsCall()->gtCallArgs->GetNext() == nullptr); op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->AsCall()->gtCallArgs); op1->gtType = TYP_REF; retNode = op1; } break; } case NI_System_Type_op_Equality: case NI_System_Type_op_Inequality: { JITDUMP("Importing Type.op_*Equality intrinsic\n"); GenTree* op1 = impStackTop(1).val; GenTree* op2 = impStackTop(0).val; GenTree* optTree = gtFoldTypeEqualityCall(ni == NI_System_Type_op_Equality, op1, op2); if (optTree != nullptr) { // Success, clean up the evaluation stack. impPopStack(); impPopStack(); // See if we can optimize even further, to a handle compare. optTree = gtFoldTypeCompare(optTree); // See if we can now fold a handle compare to a constant. optTree = gtFoldExpr(optTree); retNode = optTree; } else { // Retry optimizing these later isSpecial = true; } break; } case NI_System_Enum_HasFlag: { GenTree* thisOp = impStackTop(1).val; GenTree* flagOp = impStackTop(0).val; GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp); if (optTree != nullptr) { // Optimization successful. Pop the stack for real. impPopStack(); impPopStack(); retNode = optTree; } else { // Retry optimizing this during morph. isSpecial = true; } break; } case NI_System_Type_IsAssignableFrom: { GenTree* typeTo = impStackTop(1).val; GenTree* typeFrom = impStackTop(0).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_IsAssignableTo: { GenTree* typeTo = impStackTop(0).val; GenTree* typeFrom = impStackTop(1).val; retNode = impTypeIsAssignable(typeTo, typeFrom); break; } case NI_System_Type_get_IsValueType: { // Optimize // // call Type.GetTypeFromHandle (which is replaced with CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE) // call Type.IsValueType // // to `true` or `false` // e.g. `typeof(int).IsValueType` => `true` if (impStackTop().val->IsCall()) { GenTreeCall* call = impStackTop().val->AsCall(); if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE)) { CORINFO_CLASS_HANDLE hClass = gtGetHelperArgClassHandle(call->gtCallArgs->GetNode()); if (hClass != NO_CLASS_HANDLE) { retNode = gtNewIconNode((eeIsValueClass(hClass) && // pointers are not value types (e.g. typeof(int*).IsValueType is false) info.compCompHnd->asCorInfoType(hClass) != CORINFO_TYPE_PTR) ? 1 : 0); impPopStack(); // drop CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE call } } } break; } case NI_System_Threading_Thread_get_ManagedThreadId: { if (impStackTop().val->OperIs(GT_RET_EXPR)) { GenTreeCall* call = impStackTop().val->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { if (lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Threading_Thread_get_CurrentThread) { // drop get_CurrentThread() call impPopStack(); call->ReplaceWith(gtNewNothingNode(), this); retNode = gtNewHelperCallNode(CORINFO_HELP_GETCURRENTMANAGEDTHREADID, TYP_INT); } } } break; } #ifdef TARGET_ARM64 // Intrinsify Interlocked.Or and Interlocked.And only for arm64-v8.1 (and newer) // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). case NI_System_Threading_Interlocked_Or: case NI_System_Threading_Interlocked_And: { if (compOpportunisticallyDependsOn(InstructionSet_Atomics)) { assert(sig->numArgs == 2); GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; genTreeOps op = (ni == NI_System_Threading_Interlocked_Or) ? GT_XORR : GT_XAND; retNode = gtNewOperNode(op, genActualType(callType), op1, op2); retNode->gtFlags |= GTF_GLOB_REF | GTF_ASG; } break; } #endif // TARGET_ARM64 #if defined(TARGET_XARCH) || defined(TARGET_ARM64) // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic case NI_System_Threading_Interlocked_CompareExchange: { var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } assert(callType != TYP_STRUCT); assert(sig->numArgs == 3); GenTree* op3 = impPopStack().val; // comparand GenTree* op2 = impPopStack().val; // value GenTree* op1 = impPopStack().val; // location GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3); node->AsCmpXchg()->gtOpLocation->gtFlags |= GTF_DONT_CSE; retNode = node; break; } case NI_System_Threading_Interlocked_Exchange: case NI_System_Threading_Interlocked_ExchangeAdd: { assert(callType != TYP_STRUCT); assert(sig->numArgs == 2); var_types retType = JITtype2varType(sig->retType); if ((retType == TYP_LONG) && (TARGET_POINTER_SIZE == 4)) { break; } if ((retType != TYP_INT) && (retType != TYP_LONG)) { break; } GenTree* op2 = impPopStack().val; GenTree* op1 = impPopStack().val; // This creates: // val // XAdd // addr // field (for example) // // In the case where the first argument is the address of a local, we might // want to make this *not* make the var address-taken -- but atomic instructions // on a local are probably pretty useless anyway, so we probably don't care. op1 = gtNewOperNode(ni == NI_System_Threading_Interlocked_ExchangeAdd ? GT_XADD : GT_XCHG, genActualType(callType), op1, op2); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; retNode = op1; break; } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) case NI_System_Threading_Interlocked_MemoryBarrier: case NI_System_Threading_Interlocked_ReadMemoryBarrier: { assert(sig->numArgs == 0); GenTree* op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID); op1->gtFlags |= GTF_GLOB_REF | GTF_ASG; // On XARCH `NI_System_Threading_Interlocked_ReadMemoryBarrier` fences need not be emitted. // However, we still need to capture the effect on reordering. if (ni == NI_System_Threading_Interlocked_ReadMemoryBarrier) { op1->gtFlags |= GTF_MEMORYBARRIER_LOAD; } retNode = op1; break; } #ifdef FEATURE_HW_INTRINSICS case NI_System_Math_FusedMultiplyAdd: { #ifdef TARGET_XARCH if (compExactlyDependsOn(InstructionSet_FMA) && supportSIMDTypes()) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return FMA.MultiplyAddScalar( // Vector128.CreateScalarUnsafe(x), // Vector128.CreateScalarUnsafe(y), // Vector128.CreateScalarUnsafe(z) // ).ToScalar(); GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD16, impPopStack().val, NI_Vector128_CreateScalarUnsafe, callJitType, 16); GenTree* res = gtNewSimdHWIntrinsicNode(TYP_SIMD16, op1, op2, op3, NI_FMA_MultiplyAddScalar, callJitType, 16); retNode = gtNewSimdHWIntrinsicNode(callType, res, NI_Vector128_ToScalar, callJitType, 16); break; } #elif defined(TARGET_ARM64) if (compExactlyDependsOn(InstructionSet_AdvSimd)) { assert(varTypeIsFloating(callType)); // We are constructing a chain of intrinsics similar to: // return AdvSimd.FusedMultiplyAddScalar( // Vector64.Create{ScalarUnsafe}(z), // Vector64.Create{ScalarUnsafe}(y), // Vector64.Create{ScalarUnsafe}(x) // ).ToScalar(); NamedIntrinsic createVector64 = (callType == TYP_DOUBLE) ? NI_Vector64_Create : NI_Vector64_CreateScalarUnsafe; constexpr unsigned int simdSize = 8; GenTree* op3 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op2 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); GenTree* op1 = gtNewSimdHWIntrinsicNode(TYP_SIMD8, impPopStack().val, createVector64, callJitType, simdSize); // Note that AdvSimd.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 + op2 * op3 // while Math{F}.FusedMultiplyAddScalar(op1,op2,op3) corresponds to op1 * op2 + op3 retNode = gtNewSimdHWIntrinsicNode(TYP_SIMD8, op3, op2, op1, NI_AdvSimd_FusedMultiplyAddScalar, callJitType, simdSize); retNode = gtNewSimdHWIntrinsicNode(callType, retNode, NI_Vector64_ToScalar, callJitType, simdSize); break; } #endif // TODO-CQ-XArch: Ideally we would create a GT_INTRINSIC node for fma, however, that currently // requires more extensive changes to valuenum to support methods with 3 operands // We want to generate a GT_INTRINSIC node in the case the call can't be treated as // a target intrinsic so that we can still benefit from CSE and constant folding. break; } #endif // FEATURE_HW_INTRINSICS case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: #ifdef TARGET_ARM64 // ARM64 has fmax/fmin which are IEEE754:2019 minimum/maximum compatible // TODO-XARCH-CQ: Enable this for XARCH when one of the arguments is a constant // so we can then emit maxss/minss and avoid NaN/-0.0 handling case NI_System_Math_Max: case NI_System_Math_Min: #endif case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { retNode = impMathIntrinsic(method, sig, callType, ni, tailCall); break; } case NI_System_Array_Clone: case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: case NI_System_Object_MemberwiseClone: case NI_System_Threading_Thread_get_CurrentThread: { // Flag for later handling. isSpecial = true; break; } case NI_System_Object_GetType: { JITDUMP("\n impIntrinsic: call to Object.GetType\n"); GenTree* op1 = impStackTop(0).val; // If we're calling GetType on a boxed value, just get the type directly. if (op1->IsBoxedValue()) { JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n"); // Try and clean up the box. Obtain the handle we // were going to pass to the newobj. GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE); if (boxTypeHandle != nullptr) { // Note we don't need to play the TYP_STRUCT games here like // do for LDTOKEN since the return value of this operator is Type, // not RuntimeTypeHandle. impPopStack(); GenTreeCall::Use* helperArgs = gtNewCallArgs(boxTypeHandle); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } // If we have a constrained callvirt with a "box this" transform // we know we have a value class and hence an exact type. // // If so, instead of boxing and then extracting the type, just // construct the type directly. if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) && (constraintCallThisTransform == CORINFO_BOX_THIS)) { // Ensure this is one of the is simple box cases (in particular, rule out nullables). const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass); const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX); if (isSafeToOptimize) { JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n"); impPopStack(); GenTree* typeHandleOp = impTokenToHandle(pConstrainedResolvedToken, nullptr, true /* mustRestoreHandle */); if (typeHandleOp == nullptr) { assert(compDonotInline()); return nullptr; } GenTreeCall::Use* helperArgs = gtNewCallArgs(typeHandleOp); GenTree* runtimeType = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs); retNode = runtimeType; } } #ifdef DEBUG if (retNode != nullptr) { JITDUMP("Optimized result for call to GetType is\n"); if (verbose) { gtDispTree(retNode); } } #endif // Else expand as an intrinsic, unless the call is constrained, // in which case we defer expansion to allow impImportCall do the // special constraint processing. if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr)) { JITDUMP("Expanding as special intrinsic\n"); impPopStack(); op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, ni, method); // Set the CALL flag to indicate that the operator is implemented by a call. // Set also the EXCEPTION flag because the native implementation of // NI_System_Object_GetType intrinsic can throw NullReferenceException. op1->gtFlags |= (GTF_CALL | GTF_EXCEPT); retNode = op1; // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } if (retNode == nullptr) { JITDUMP("Leaving as normal call\n"); // Might be further optimizable, so arrange to leave a mark behind isSpecial = true; } break; } case NI_System_Array_GetLength: case NI_System_Array_GetLowerBound: case NI_System_Array_GetUpperBound: { // System.Array.GetLength(Int32) method: // public int GetLength(int dimension) // System.Array.GetLowerBound(Int32) method: // public int GetLowerBound(int dimension) // System.Array.GetUpperBound(Int32) method: // public int GetUpperBound(int dimension) // // Only implement these as intrinsics for multi-dimensional arrays. // Only handle constant dimension arguments. GenTree* gtDim = impStackTop().val; GenTree* gtArr = impStackTop(1).val; if (gtDim->IsIntegralConst()) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE arrCls = gtGetClassHandle(gtArr, &isExact, &isNonNull); if (arrCls != NO_CLASS_HANDLE) { unsigned rank = info.compCompHnd->getArrayRank(arrCls); if ((rank > 1) && !info.compCompHnd->isSDArray(arrCls)) { // `rank` is guaranteed to be <=32 (see MAX_RANK in vm\array.h). Any constant argument // is `int` sized. INT64 dimValue = gtDim->AsIntConCommon()->IntegralValue(); assert((unsigned int)dimValue == dimValue); unsigned dim = (unsigned int)dimValue; if (dim < rank) { // This is now known to be a multi-dimension array with a constant dimension // that is in range; we can expand it as an intrinsic. impPopStack().val; // Pop the dim and array object; we already have a pointer to them. impPopStack().val; // Make sure there are no global effects in the array (such as it being a function // call), so we can mark the generated indirection with GTF_IND_INVARIANT. In the // GetUpperBound case we need the cloned object, since we refer to the array // object twice. In the other cases, we don't need to clone. GenTree* gtArrClone = nullptr; if (((gtArr->gtFlags & GTF_GLOB_EFFECT) != 0) || (ni == NI_System_Array_GetUpperBound)) { gtArr = impCloneExpr(gtArr, &gtArrClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("MD intrinsics array")); } switch (ni) { case NI_System_Array_GetLength: { // Generate *(array + offset-to-length-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLengthOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetLowerBound: { // Generate *(array + offset-to-bounds-array + sizeof(int) * dim) unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); retNode = gtNewIndir(TYP_INT, gtAddr); retNode->gtFlags |= GTF_IND_INVARIANT; break; } case NI_System_Array_GetUpperBound: { assert(gtArrClone != nullptr); // Generate: // *(array + offset-to-length-array + sizeof(int) * dim) + // *(array + offset-to-bounds-array + sizeof(int) * dim) - 1 unsigned offs = eeGetMDArrayLowerBoundOffset(rank, dim); GenTree* gtOffs = gtNewIconNode(offs, TYP_I_IMPL); GenTree* gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArr, gtOffs); GenTree* gtLowerBound = gtNewIndir(TYP_INT, gtAddr); gtLowerBound->gtFlags |= GTF_IND_INVARIANT; offs = eeGetMDArrayLengthOffset(rank, dim); gtOffs = gtNewIconNode(offs, TYP_I_IMPL); gtAddr = gtNewOperNode(GT_ADD, TYP_BYREF, gtArrClone, gtOffs); GenTree* gtLength = gtNewIndir(TYP_INT, gtAddr); gtLength->gtFlags |= GTF_IND_INVARIANT; GenTree* gtSum = gtNewOperNode(GT_ADD, TYP_INT, gtLowerBound, gtLength); GenTree* gtOne = gtNewIconNode(1, TYP_INT); retNode = gtNewOperNode(GT_SUB, TYP_INT, gtSum, gtOne); break; } default: unreached(); } } } } } break; } case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness: { assert(sig->numArgs == 1); // We expect the return type of the ReverseEndianness routine to match the type of the // one and only argument to the method. We use a special instruction for 16-bit // BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally, // we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a // 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below. switch (sig->retType) { case CorInfoType::CORINFO_TYPE_SHORT: case CorInfoType::CORINFO_TYPE_USHORT: retNode = gtNewCastNode(TYP_INT, gtNewOperNode(GT_BSWAP16, TYP_INT, impPopStack().val), false, callType); break; case CorInfoType::CORINFO_TYPE_INT: case CorInfoType::CORINFO_TYPE_UINT: #ifdef TARGET_64BIT case CorInfoType::CORINFO_TYPE_LONG: case CorInfoType::CORINFO_TYPE_ULONG: #endif // TARGET_64BIT retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val); break; default: // This default case gets hit on 32-bit archs when a call to a 64-bit overload // of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard // method call, where the implementation decomposes the operation into two 32-bit // bswap routines. If the input to the 64-bit function is a constant, then we rely // on inlining + constant folding of 32-bit bswaps to effectively constant fold // the 64-bit call site. break; } break; } // Fold PopCount for constant input case NI_System_Numerics_BitOperations_PopCount: { assert(sig->numArgs == 1); if (impStackTop().val->IsIntegralConst()) { typeInfo argType = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); INT64 cns = impPopStack().val->AsIntConCommon()->IntegralValue(); if (argType.IsType(TI_LONG)) { retNode = gtNewIconNode(genCountBits(cns), callType); } else { assert(argType.IsType(TI_INT)); retNode = gtNewIconNode(genCountBits(static_cast<unsigned>(cns)), callType); } } break; } case NI_System_GC_KeepAlive: { retNode = impKeepAliveIntrinsic(impPopStack().val); break; } default: break; } } if (mustExpand && (retNode == nullptr)) { assert(!"Unhandled must expand intrinsic, throwing PlatformNotSupportedException"); return impUnsupportedNamedIntrinsic(CORINFO_HELP_THROW_PLATFORM_NOT_SUPPORTED, method, sig, mustExpand); } // Optionally report if this intrinsic is special // (that is, potentially re-optimizable during morph). if (isSpecialIntrinsic != nullptr) { *isSpecialIntrinsic = isSpecial; } return retNode; } GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) { // Optimize patterns like: // // typeof(TTo).IsAssignableFrom(typeof(TTFrom)) // valueTypeVar.GetType().IsAssignableFrom(typeof(TTFrom)) // typeof(TTFrom).IsAssignableTo(typeof(TTo)) // typeof(TTFrom).IsAssignableTo(valueTypeVar.GetType()) // // to true/false if (typeTo->IsCall() && typeFrom->IsCall()) { // make sure both arguments are `typeof()` CORINFO_METHOD_HANDLE hTypeof = eeFindHelper(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE); if ((typeTo->AsCall()->gtCallMethHnd == hTypeof) && (typeFrom->AsCall()->gtCallMethHnd == hTypeof)) { CORINFO_CLASS_HANDLE hClassTo = gtGetHelperArgClassHandle(typeTo->AsCall()->gtCallArgs->GetNode()); CORINFO_CLASS_HANDLE hClassFrom = gtGetHelperArgClassHandle(typeFrom->AsCall()->gtCallArgs->GetNode()); if (hClassTo == NO_CLASS_HANDLE || hClassFrom == NO_CLASS_HANDLE) { return nullptr; } TypeCompareState castResult = info.compCompHnd->compareTypesForCast(hClassFrom, hClassTo); if (castResult == TypeCompareState::May) { // requires runtime check // e.g. __Canon, COMObjects, Nullable return nullptr; } GenTreeIntCon* retNode = gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0); impPopStack(); // drop both CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE calls impPopStack(); return retNode; } } return nullptr; } GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, var_types callType, NamedIntrinsic intrinsicName, bool tailCall) { GenTree* op1; GenTree* op2; assert(callType != TYP_STRUCT); assert(IsMathIntrinsic(intrinsicName)); op1 = nullptr; #if !defined(TARGET_X86) // Intrinsics that are not implemented directly by target instructions will // be re-materialized as users calls in rationalizer. For prefixed tail calls, // don't do this optimization, because // a) For back compatibility reasons on desktop .NET Framework 4.6 / 4.6.1 // b) It will be non-trivial task or too late to re-materialize a surviving // tail prefixed GT_INTRINSIC as tail call in rationalizer. if (!IsIntrinsicImplementedByUserCall(intrinsicName) || !tailCall) #else // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad // code generation for certain EH constructs. if (!IsIntrinsicImplementedByUserCall(intrinsicName)) #endif { CORINFO_CLASS_HANDLE tmpClass; CORINFO_ARG_LIST_HANDLE arg; var_types op1Type; var_types op2Type; switch (sig->numArgs) { case 1: op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicName, method); break; case 2: op2 = impPopStack().val; op1 = impPopStack().val; arg = sig->args; op1Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op1->TypeGet() != genActualType(op1Type)) { assert(varTypeIsFloating(op1)); op1 = gtNewCastNode(callType, op1, false, callType); } arg = info.compCompHnd->getArgNext(arg); op2Type = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg, &tmpClass))); if (op2->TypeGet() != genActualType(op2Type)) { assert(varTypeIsFloating(op2)); op2 = gtNewCastNode(callType, op2, false, callType); } op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicName, method); break; default: NO_WAY("Unsupported number of args for Math Intrinsic"); } if (IsIntrinsicImplementedByUserCall(intrinsicName)) { op1->gtFlags |= GTF_CALL; } } return op1; } //------------------------------------------------------------------------ // lookupNamedIntrinsic: map method to jit named intrinsic value // // Arguments: // method -- method handle for method // // Return Value: // Id for the named intrinsic, or Illegal if none. // // Notes: // method should have CORINFO_FLG_INTRINSIC set in its attributes, // otherwise it is not a named jit intrinsic. // NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method) { const char* className = nullptr; const char* namespaceName = nullptr; const char* enclosingClassName = nullptr; const char* methodName = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName, &enclosingClassName); JITDUMP("Named Intrinsic "); if (namespaceName != nullptr) { JITDUMP("%s.", namespaceName); } if (enclosingClassName != nullptr) { JITDUMP("%s.", enclosingClassName); } if (className != nullptr) { JITDUMP("%s.", className); } if (methodName != nullptr) { JITDUMP("%s", methodName); } if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr)) { // Check if we are dealing with an MD array's known runtime method CorInfoArrayIntrinsic arrayFuncIndex = info.compCompHnd->getArrayIntrinsicID(method); switch (arrayFuncIndex) { case CorInfoArrayIntrinsic::GET: JITDUMP("ARRAY_FUNC_GET: Recognized\n"); return NI_Array_Get; case CorInfoArrayIntrinsic::SET: JITDUMP("ARRAY_FUNC_SET: Recognized\n"); return NI_Array_Set; case CorInfoArrayIntrinsic::ADDRESS: JITDUMP("ARRAY_FUNC_ADDRESS: Recognized\n"); return NI_Array_Address; default: break; } JITDUMP(": Not recognized, not enough metadata\n"); return NI_Illegal; } JITDUMP(": "); NamedIntrinsic result = NI_Illegal; if (strcmp(namespaceName, "System") == 0) { if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0)) { result = NI_System_Enum_HasFlag; } else if (strcmp(className, "Activator") == 0) { if (strcmp(methodName, "AllocatorOf") == 0) { result = NI_System_Activator_AllocatorOf; } else if (strcmp(methodName, "DefaultConstructorOf") == 0) { result = NI_System_Activator_DefaultConstructorOf; } } else if (strcmp(className, "ByReference`1") == 0) { if (strcmp(methodName, ".ctor") == 0) { result = NI_System_ByReference_ctor; } else if (strcmp(methodName, "get_Value") == 0) { result = NI_System_ByReference_get_Value; } } else if (strcmp(className, "Math") == 0 || strcmp(className, "MathF") == 0) { if (strcmp(methodName, "Abs") == 0) { result = NI_System_Math_Abs; } else if (strcmp(methodName, "Acos") == 0) { result = NI_System_Math_Acos; } else if (strcmp(methodName, "Acosh") == 0) { result = NI_System_Math_Acosh; } else if (strcmp(methodName, "Asin") == 0) { result = NI_System_Math_Asin; } else if (strcmp(methodName, "Asinh") == 0) { result = NI_System_Math_Asinh; } else if (strcmp(methodName, "Atan") == 0) { result = NI_System_Math_Atan; } else if (strcmp(methodName, "Atanh") == 0) { result = NI_System_Math_Atanh; } else if (strcmp(methodName, "Atan2") == 0) { result = NI_System_Math_Atan2; } else if (strcmp(methodName, "Cbrt") == 0) { result = NI_System_Math_Cbrt; } else if (strcmp(methodName, "Ceiling") == 0) { result = NI_System_Math_Ceiling; } else if (strcmp(methodName, "Cos") == 0) { result = NI_System_Math_Cos; } else if (strcmp(methodName, "Cosh") == 0) { result = NI_System_Math_Cosh; } else if (strcmp(methodName, "Exp") == 0) { result = NI_System_Math_Exp; } else if (strcmp(methodName, "Floor") == 0) { result = NI_System_Math_Floor; } else if (strcmp(methodName, "FMod") == 0) { result = NI_System_Math_FMod; } else if (strcmp(methodName, "FusedMultiplyAdd") == 0) { result = NI_System_Math_FusedMultiplyAdd; } else if (strcmp(methodName, "ILogB") == 0) { result = NI_System_Math_ILogB; } else if (strcmp(methodName, "Log") == 0) { result = NI_System_Math_Log; } else if (strcmp(methodName, "Log2") == 0) { result = NI_System_Math_Log2; } else if (strcmp(methodName, "Log10") == 0) { result = NI_System_Math_Log10; } else if (strcmp(methodName, "Max") == 0) { result = NI_System_Math_Max; } else if (strcmp(methodName, "Min") == 0) { result = NI_System_Math_Min; } else if (strcmp(methodName, "Pow") == 0) { result = NI_System_Math_Pow; } else if (strcmp(methodName, "Round") == 0) { result = NI_System_Math_Round; } else if (strcmp(methodName, "Sin") == 0) { result = NI_System_Math_Sin; } else if (strcmp(methodName, "Sinh") == 0) { result = NI_System_Math_Sinh; } else if (strcmp(methodName, "Sqrt") == 0) { result = NI_System_Math_Sqrt; } else if (strcmp(methodName, "Tan") == 0) { result = NI_System_Math_Tan; } else if (strcmp(methodName, "Tanh") == 0) { result = NI_System_Math_Tanh; } else if (strcmp(methodName, "Truncate") == 0) { result = NI_System_Math_Truncate; } } else if (strcmp(className, "GC") == 0) { if (strcmp(methodName, "KeepAlive") == 0) { result = NI_System_GC_KeepAlive; } } else if (strcmp(className, "Array") == 0) { if (strcmp(methodName, "Clone") == 0) { result = NI_System_Array_Clone; } else if (strcmp(methodName, "GetLength") == 0) { result = NI_System_Array_GetLength; } else if (strcmp(methodName, "GetLowerBound") == 0) { result = NI_System_Array_GetLowerBound; } else if (strcmp(methodName, "GetUpperBound") == 0) { result = NI_System_Array_GetUpperBound; } } else if (strcmp(className, "Object") == 0) { if (strcmp(methodName, "MemberwiseClone") == 0) { result = NI_System_Object_MemberwiseClone; } else if (strcmp(methodName, "GetType") == 0) { result = NI_System_Object_GetType; } else if (strcmp(methodName, "MethodTableOf") == 0) { result = NI_System_Object_MethodTableOf; } } else if (strcmp(className, "RuntimeTypeHandle") == 0) { if (strcmp(methodName, "GetValueInternal") == 0) { result = NI_System_RuntimeTypeHandle_GetValueInternal; } } else if (strcmp(className, "Type") == 0) { if (strcmp(methodName, "get_IsValueType") == 0) { result = NI_System_Type_get_IsValueType; } else if (strcmp(methodName, "IsAssignableFrom") == 0) { result = NI_System_Type_IsAssignableFrom; } else if (strcmp(methodName, "IsAssignableTo") == 0) { result = NI_System_Type_IsAssignableTo; } else if (strcmp(methodName, "op_Equality") == 0) { result = NI_System_Type_op_Equality; } else if (strcmp(methodName, "op_Inequality") == 0) { result = NI_System_Type_op_Inequality; } else if (strcmp(methodName, "GetTypeFromHandle") == 0) { result = NI_System_Type_GetTypeFromHandle; } } else if (strcmp(className, "String") == 0) { if (strcmp(methodName, "Equals") == 0) { result = NI_System_String_Equals; } else if (strcmp(methodName, "get_Chars") == 0) { result = NI_System_String_get_Chars; } else if (strcmp(methodName, "get_Length") == 0) { result = NI_System_String_get_Length; } else if (strcmp(methodName, "op_Implicit") == 0) { result = NI_System_String_op_Implicit; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_String_StartsWith; } } else if (strcmp(className, "MemoryExtensions") == 0) { if (strcmp(methodName, "AsSpan") == 0) { result = NI_System_MemoryExtensions_AsSpan; } if (strcmp(methodName, "SequenceEqual") == 0) { result = NI_System_MemoryExtensions_SequenceEqual; } else if (strcmp(methodName, "Equals") == 0) { result = NI_System_MemoryExtensions_Equals; } else if (strcmp(methodName, "StartsWith") == 0) { result = NI_System_MemoryExtensions_StartsWith; } } else if (strcmp(className, "Span`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_Span_get_Item; } } else if (strcmp(className, "ReadOnlySpan`1") == 0) { if (strcmp(methodName, "get_Item") == 0) { result = NI_System_ReadOnlySpan_get_Item; } } else if (strcmp(className, "EETypePtr") == 0) { if (strcmp(methodName, "EETypePtrOf") == 0) { result = NI_System_EETypePtr_EETypePtrOf; } } } else if (strcmp(namespaceName, "System.Threading") == 0) { if (strcmp(className, "Thread") == 0) { if (strcmp(methodName, "get_CurrentThread") == 0) { result = NI_System_Threading_Thread_get_CurrentThread; } else if (strcmp(methodName, "get_ManagedThreadId") == 0) { result = NI_System_Threading_Thread_get_ManagedThreadId; } } else if (strcmp(className, "Interlocked") == 0) { #ifndef TARGET_ARM64 // TODO-CQ: Implement for XArch (https://github.com/dotnet/runtime/issues/32239). if (strcmp(methodName, "And") == 0) { result = NI_System_Threading_Interlocked_And; } else if (strcmp(methodName, "Or") == 0) { result = NI_System_Threading_Interlocked_Or; } #endif if (strcmp(methodName, "CompareExchange") == 0) { result = NI_System_Threading_Interlocked_CompareExchange; } else if (strcmp(methodName, "Exchange") == 0) { result = NI_System_Threading_Interlocked_Exchange; } else if (strcmp(methodName, "ExchangeAdd") == 0) { result = NI_System_Threading_Interlocked_ExchangeAdd; } else if (strcmp(methodName, "MemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_MemoryBarrier; } else if (strcmp(methodName, "ReadMemoryBarrier") == 0) { result = NI_System_Threading_Interlocked_ReadMemoryBarrier; } } } #if defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Buffers.Binary") == 0) { if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0)) { result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness; } } #endif // defined(TARGET_XARCH) || defined(TARGET_ARM64) else if (strcmp(namespaceName, "System.Collections.Generic") == 0) { if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_EqualityComparer_get_Default; } else if ((strcmp(className, "Comparer`1") == 0) && (strcmp(methodName, "get_Default") == 0)) { result = NI_System_Collections_Generic_Comparer_get_Default; } } else if ((strcmp(namespaceName, "System.Numerics") == 0) && (strcmp(className, "BitOperations") == 0)) { if (strcmp(methodName, "PopCount") == 0) { result = NI_System_Numerics_BitOperations_PopCount; } } #ifdef FEATURE_HW_INTRINSICS else if (strcmp(namespaceName, "System.Numerics") == 0) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); int sizeOfVectorT = getSIMDVectorRegisterByteLength(); result = SimdAsHWIntrinsicInfo::lookupId(&sig, className, methodName, enclosingClassName, sizeOfVectorT); } #endif // FEATURE_HW_INTRINSICS else if ((strcmp(namespaceName, "System.Runtime.CompilerServices") == 0) && (strcmp(className, "RuntimeHelpers") == 0)) { if (strcmp(methodName, "CreateSpan") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_CreateSpan; } else if (strcmp(methodName, "InitializeArray") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_InitializeArray; } else if (strcmp(methodName, "IsKnownConstant") == 0) { result = NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant; } } else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0) { // We go down this path even when FEATURE_HW_INTRINSICS isn't enabled // so we can specially handle IsSupported and recursive calls. // This is required to appropriately handle the intrinsics on platforms // which don't support them. On such a platform methods like Vector64.Create // will be seen as `Intrinsic` and `mustExpand` due to having a code path // which is recursive. When such a path is hit we expect it to be handled by // the importer and we fire an assert if it wasn't and in previous versions // of the JIT would fail fast. This was changed to throw a PNSE instead but // we still assert as most intrinsics should have been recognized/handled. // In order to avoid the assert, we specially handle the IsSupported checks // (to better allow dead-code optimizations) and we explicitly throw a PNSE // as we know that is the desired behavior for the HWIntrinsics when not // supported. For cases like Vector64.Create, this is fine because it will // be behind a relevant IsSupported check and will never be hit and the // software fallback will be executed instead. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef FEATURE_HW_INTRINSICS namespaceName += 25; const char* platformNamespaceName; #if defined(TARGET_XARCH) platformNamespaceName = ".X86"; #elif defined(TARGET_ARM64) platformNamespaceName = ".Arm"; #else #error Unsupported platform #endif if ((namespaceName[0] == '\0') || (strcmp(namespaceName, platformNamespaceName) == 0)) { CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(method, &sig); result = HWIntrinsicInfo::lookupId(this, &sig, className, methodName, enclosingClassName); } #endif // FEATURE_HW_INTRINSICS if (result == NI_Illegal) { if ((strcmp(methodName, "get_IsSupported") == 0) || (strcmp(methodName, "get_IsHardwareAccelerated") == 0)) { // This allows the relevant code paths to be dropped as dead code even // on platforms where FEATURE_HW_INTRINSICS is not supported. result = NI_IsSupported_False; } else if (gtIsRecursiveCall(method)) { // For the framework itself, any recursive intrinsics will either be // only supported on a single platform or will be guarded by a relevant // IsSupported check so the throw PNSE will be valid or dropped. result = NI_Throw_PlatformNotSupportedException; } } } else if (strcmp(namespaceName, "System.StubHelpers") == 0) { if (strcmp(className, "StubHelpers") == 0) { if (strcmp(methodName, "GetStubContext") == 0) { result = NI_System_StubHelpers_GetStubContext; } else if (strcmp(methodName, "NextCallReturnAddress") == 0) { result = NI_System_StubHelpers_NextCallReturnAddress; } } } if (result == NI_Illegal) { JITDUMP("Not recognized\n"); } else if (result == NI_IsSupported_False) { JITDUMP("Unsupported - return false"); } else if (result == NI_Throw_PlatformNotSupportedException) { JITDUMP("Unsupported - throw PlatformNotSupportedException"); } else { JITDUMP("Recognized\n"); } return result; } //------------------------------------------------------------------------ // impUnsupportedNamedIntrinsic: Throws an exception for an unsupported named intrinsic // // Arguments: // helper - JIT helper ID for the exception to be thrown // method - method handle of the intrinsic function. // sig - signature of the intrinsic call // mustExpand - true if the intrinsic must return a GenTree*; otherwise, false // // Return Value: // a gtNewMustThrowException if mustExpand is true; otherwise, nullptr // GenTree* Compiler::impUnsupportedNamedIntrinsic(unsigned helper, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig, bool mustExpand) { // We've hit some error case and may need to return a node for the given error. // // When `mustExpand=false`, we are attempting to inline the intrinsic directly into another method. In this // scenario, we need to return `nullptr` so that a GT_CALL to the intrinsic is emitted instead. This is to // ensure that everything continues to behave correctly when optimizations are enabled (e.g. things like the // inliner may expect the node we return to have a certain signature, and the `MustThrowException` node won't // match that). // // When `mustExpand=true`, we are in a GT_CALL to the intrinsic and are attempting to JIT it. This will generally // be in response to an indirect call (e.g. done via reflection) or in response to an earlier attempt returning // `nullptr` (under `mustExpand=false`). In that scenario, we are safe to return the `MustThrowException` node. if (mustExpand) { for (unsigned i = 0; i < sig->numArgs; i++) { impPopStack(); } return gtNewMustThrowException(helper, JITtype2varType(sig->retType), sig->retTypeClass); } else { return nullptr; } } /*****************************************************************************/ GenTree* Compiler::impArrayAccessIntrinsic( CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, NamedIntrinsic intrinsicName) { /* If we are generating SMALL_CODE, we don't want to use intrinsics for the following, as it generates fatter code. */ if (compCodeOpt() == SMALL_CODE) { return nullptr; } /* These intrinsics generate fatter (but faster) code and are only done if we don't need SMALL_CODE */ unsigned rank = (intrinsicName == NI_Array_Set) ? (sig->numArgs - 1) : sig->numArgs; // The rank 1 case is special because it has to handle two array formats // we will simply not do that case if (rank > GT_ARR_MAX_RANK || rank <= 1) { return nullptr; } CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr; var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd)); // For the ref case, we will only be able to inline if the types match // (verifier checks for this, we don't care for the nonverified case and the // type is final (so we don't need to do the cast) if ((intrinsicName != NI_Array_Get) && !readonlyCall && varTypeIsGC(elemType)) { // Get the call site signature CORINFO_SIG_INFO LocalSig; eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig); assert(LocalSig.hasThis()); CORINFO_CLASS_HANDLE actualElemClsHnd; if (intrinsicName == NI_Array_Set) { // Fetch the last argument, the one that indicates the type we are setting. CORINFO_ARG_LIST_HANDLE argType = LocalSig.args; for (unsigned r = 0; r < rank; r++) { argType = info.compCompHnd->getArgNext(argType); } typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType); actualElemClsHnd = argInfo.GetClassHandle(); } else { assert(intrinsicName == NI_Array_Address); // Fetch the return type typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass); assert(retInfo.IsByRef()); actualElemClsHnd = retInfo.GetClassHandle(); } // if it's not final, we can't do the optimization if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL)) { return nullptr; } } unsigned arrayElemSize; if (elemType == TYP_STRUCT) { assert(arrElemClsHnd); arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd); } else { arrayElemSize = genTypeSize(elemType); } if ((unsigned char)arrayElemSize != arrayElemSize) { // arrayElemSize would be truncated as an unsigned char. // This means the array element is too large. Don't do the optimization. return nullptr; } GenTree* val = nullptr; if (intrinsicName == NI_Array_Set) { // Assignment of a struct is more work, and there are more gets than sets. if (elemType == TYP_STRUCT) { return nullptr; } val = impPopStack().val; assert(genActualType(elemType) == genActualType(val->gtType) || (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) || (elemType == TYP_INT && val->gtType == TYP_BYREF) || (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT)); } noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK); GenTree* inds[GT_ARR_MAX_RANK]; for (unsigned k = rank; k > 0; k--) { inds[k - 1] = impPopStack().val; } GenTree* arr = impPopStack().val; assert(arr->gtType == TYP_REF); GenTree* arrElem = new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank), static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]); if (intrinsicName != NI_Array_Address) { if (varTypeIsStruct(elemType)) { arrElem = gtNewObjNode(sig->retTypeClass, arrElem); } else { arrElem = gtNewOperNode(GT_IND, elemType, arrElem); } } if (intrinsicName == NI_Array_Set) { assert(val != nullptr); return gtNewAssignNode(arrElem, val); } else { return arrElem; } } //------------------------------------------------------------------------ // impKeepAliveIntrinsic: Import the GC.KeepAlive intrinsic call // // Imports the intrinsic as a GT_KEEPALIVE node, and, as an optimization, // if the object to keep alive is a GT_BOX, removes its side effects and // uses the address of a local (copied from the box's source if needed) // as the operand for GT_KEEPALIVE. For the BOX optimization, if the class // of the box has no GC fields, a GT_NOP is returned. // // Arguments: // objToKeepAlive - the intrinisic call's argument // // Return Value: // The imported GT_KEEPALIVE or GT_NOP - see description. // GenTree* Compiler::impKeepAliveIntrinsic(GenTree* objToKeepAlive) { assert(objToKeepAlive->TypeIs(TYP_REF)); if (opts.OptimizationEnabled() && objToKeepAlive->IsBoxedValue()) { CORINFO_CLASS_HANDLE boxedClass = lvaGetDesc(objToKeepAlive->AsBox()->BoxOp()->AsLclVar())->lvClassHnd; ClassLayout* layout = typGetObjLayout(boxedClass); if (!layout->HasGCPtr()) { gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_AND_NARROW); JITDUMP("\nBOX class has no GC fields, KEEPALIVE is a NOP"); return gtNewNothingNode(); } GenTree* boxSrc = gtTryRemoveBoxUpstreamEffects(objToKeepAlive, BR_REMOVE_BUT_NOT_NARROW); if (boxSrc != nullptr) { unsigned boxTempNum; if (boxSrc->OperIs(GT_LCL_VAR)) { boxTempNum = boxSrc->AsLclVarCommon()->GetLclNum(); } else { boxTempNum = lvaGrabTemp(true DEBUGARG("Temp for the box source")); GenTree* boxTempAsg = gtNewTempAssign(boxTempNum, boxSrc); Statement* boxAsgStmt = objToKeepAlive->AsBox()->gtCopyStmtWhenInlinedBoxValue; boxAsgStmt->SetRootNode(boxTempAsg); } JITDUMP("\nImporting KEEPALIVE(BOX) as KEEPALIVE(ADDR(LCL_VAR V%02u))", boxTempNum); GenTree* boxTemp = gtNewLclvNode(boxTempNum, boxSrc->TypeGet()); GenTree* boxTempAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, boxTemp); return gtNewKeepAliveNode(boxTempAddr); } } return gtNewKeepAliveNode(objToKeepAlive); } bool Compiler::verMergeEntryStates(BasicBlock* block, bool* changed) { unsigned i; // do some basic checks first if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth) { return false; } if (verCurrentState.esStackDepth > 0) { // merge stack types StackEntry* parentStack = block->bbStackOnEntry(); StackEntry* childStack = verCurrentState.esStack; for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++) { if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == false) { return false; } } } // merge initialization status of this ptr if (verTrackObjCtorInitState) { // If we're tracking the CtorInitState, then it must not be unknown in the current state. assert(verCurrentState.thisInitialized != TIS_Bottom); // If the successor block's thisInit state is unknown, copy it from the current state. if (block->bbThisOnEntry() == TIS_Bottom) { *changed = true; verSetThisInit(block, verCurrentState.thisInitialized); } else if (verCurrentState.thisInitialized != block->bbThisOnEntry()) { if (block->bbThisOnEntry() != TIS_Top) { *changed = true; verSetThisInit(block, TIS_Top); if (block->bbFlags & BBF_FAILED_VERIFICATION) { // The block is bad. Control can flow through the block to any handler that catches the // verification exception, but the importer ignores bad blocks and therefore won't model // this flow in the normal way. To complete the merge into the bad block, the new state // needs to be manually pushed to the handlers that may be reached after the verification // exception occurs. // // Usually, the new state was already propagated to the relevant handlers while processing // the predecessors of the bad block. The exception is when the bad block is at the start // of a try region, meaning it is protected by additional handlers that do not protect its // predecessors. // if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0)) { // Push TIS_Top to the handlers that protect the bad block. Note that this can cause // recursive calls back into this code path (if successors of the current bad block are // also bad blocks). // ThisInitState origTIS = verCurrentState.thisInitialized; verCurrentState.thisInitialized = TIS_Top; impVerifyEHBlock(block, true); verCurrentState.thisInitialized = origTIS; } } } } } else { assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom); } return true; } /***************************************************************************** * 'logMsg' is true if a log message needs to be logged. false if the caller has * already logged it (presumably in a more detailed fashion than done here) */ void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { block->bbJumpKind = BBJ_THROW; block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; impCurStmtOffsSet(block->bbCodeOffs); // Clear the statement list as it exists so far; we're only going to have a verification exception. impStmtList = impLastStmt = nullptr; #ifdef DEBUG if (logMsg) { JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName, block->bbCodeOffs, block->bbCodeOffsEnd)); if (verbose) { printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs); } } if (JitConfig.DebugBreakOnVerificationFailure()) { DebugBreak(); } #endif impBeginTreeList(); // if the stack is non-empty evaluate all the side-effects if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); GenTree* op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewCallArgs(gtNewIconNode(block->bbCodeOffs))); // verCurrentState.esStackDepth = 0; impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // The inliner is not able to handle methods that require throw block, so // make sure this methods never gets inlined. info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE); } /***************************************************************************** * */ void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg)) { verResetCurrentState(block, &verCurrentState); verConvertBBToThrowVerificationException(block DEBUGARG(logMsg)); #ifdef DEBUG impNoteLastILoffs(); // Remember at which BC offset the tree was finished #endif // DEBUG } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd) { assert(ciType < CORINFO_TYPE_COUNT); typeInfo tiResult; switch (ciType) { case CORINFO_TYPE_STRING: case CORINFO_TYPE_CLASS: tiResult = verMakeTypeInfo(clsHnd); if (!tiResult.IsType(TI_REF)) { // type must be consistent with element type return typeInfo(); } break; #ifdef TARGET_64BIT case CORINFO_TYPE_NATIVEINT: case CORINFO_TYPE_NATIVEUINT: if (clsHnd) { // If we have more precise information, use it return verMakeTypeInfo(clsHnd); } else { return typeInfo::nativeInt(); } break; #endif // TARGET_64BIT case CORINFO_TYPE_VALUECLASS: case CORINFO_TYPE_REFANY: tiResult = verMakeTypeInfo(clsHnd); // type must be constant with element type; if (!tiResult.IsValueClass()) { return typeInfo(); } break; case CORINFO_TYPE_VAR: return verMakeTypeInfo(clsHnd); case CORINFO_TYPE_PTR: // for now, pointers are treated as an error case CORINFO_TYPE_VOID: return typeInfo(); break; case CORINFO_TYPE_BYREF: { CORINFO_CLASS_HANDLE childClassHandle; CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle); return ByRef(verMakeTypeInfo(childType, childClassHandle)); } break; default: if (clsHnd) { // If we have more precise information, use it return typeInfo(TI_STRUCT, clsHnd); } else { return typeInfo(JITtype2tiType(ciType)); } } return tiResult; } /******************************************************************************/ typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */) { if (clsHnd == nullptr) { return typeInfo(); } // Byrefs should only occur in method and local signatures, which are accessed // using ICorClassInfo and ICorClassInfo.getChildType. // So findClass() and getClassAttribs() should not be called for byrefs if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF) { assert(!"Did findClass() return a Byref?"); return typeInfo(); } unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd); if (attribs & CORINFO_FLG_VALUECLASS) { CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd); // Meta-data validation should ensure that CORINF_TYPE_BYREF should // not occur here, so we may want to change this to an assert instead. if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR) { return typeInfo(); } #ifdef TARGET_64BIT if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT) { return typeInfo::nativeInt(); } #endif // TARGET_64BIT if (t != CORINFO_TYPE_UNDEF) { return (typeInfo(JITtype2tiType(t))); } else if (bashStructToRef) { return (typeInfo(TI_REF, clsHnd)); } else { return (typeInfo(TI_STRUCT, clsHnd)); } } else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE) { // See comment in _typeInfo.h for why we do it this way. return (typeInfo(TI_REF, clsHnd, true)); } else { return (typeInfo(TI_REF, clsHnd)); } } /******************************************************************************/ bool Compiler::verIsSDArray(const typeInfo& ti) { if (ti.IsNullObjRef()) { // nulls are SD arrays return true; } if (!ti.IsType(TI_REF)) { return false; } if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef())) { return false; } return true; } /******************************************************************************/ /* Given 'arrayObjectType' which is an array type, fetch the element type. */ /* Returns an error type if anything goes wrong */ typeInfo Compiler::verGetArrayElemType(const typeInfo& arrayObjectType) { assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explicitly since that is a success case if (!verIsSDArray(arrayObjectType)) { return typeInfo(); } CORINFO_CLASS_HANDLE childClassHandle = nullptr; CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle); return verMakeTypeInfo(ciType, childClassHandle); } /***************************************************************************** */ typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args) { CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle)); var_types type = JITtype2varType(ciType); if (varTypeIsGC(type)) { // For efficiency, getArgType only returns something in classHandle for // value types. For other types that have addition type info, you // have to call back explicitly classHandle = info.compCompHnd->getArgClass(sig, args); if (!classHandle) { NO_WAY("Could not figure out Class specified in argument or local signature"); } } return verMakeTypeInfo(ciType, classHandle); } bool Compiler::verIsByRefLike(const typeInfo& ti) { if (ti.IsByRef()) { return true; } if (!ti.IsType(TI_STRUCT)) { return false; } return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE; } bool Compiler::verIsSafeToReturnByRef(const typeInfo& ti) { if (ti.IsPermanentHomeByRef()) { return true; } else { return false; } } bool Compiler::verIsBoxable(const typeInfo& ti) { return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables || ti.IsUnboxedGenericTypeVar() || (ti.IsType(TI_STRUCT) && // exclude byreflike structs !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_BYREF_LIKE))); } // Is it a boxed value type? bool Compiler::verIsBoxedValueType(const typeInfo& ti) { if (ti.GetType() == TI_REF) { CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef(); return !!eeIsValueClass(clsHnd); } else { return false; } } /***************************************************************************** * * Check if a TailCall is legal. */ bool Compiler::verCheckTailCallConstraint( OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter? bool speculative // If true, won't throw if verificatoin fails. Instead it will // return false to the caller. // If false, it will throw. ) { DWORD mflags; CORINFO_SIG_INFO sig; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped CORINFO_METHOD_HANDLE methodHnd = nullptr; CORINFO_CLASS_HANDLE methodClassHnd = nullptr; unsigned methodClassFlgs = 0; assert(impOpcodeIsCallOpcode(opcode)); if (compIsForInlining()) { return false; } // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { /* Get the call sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; } else { methodHnd = pResolvedToken->hMethod; mflags = info.compCompHnd->getMethodAttribs(methodHnd); // When verifying generic code we pair the method handle with its // owning class to get the exact method signature. methodClassHnd = pResolvedToken->hClass; assert(methodClassHnd); eeGetMethodSig(methodHnd, &sig, methodClassHnd); // opcode specific check methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd); } // We must have got the methodClassHnd if opcode is not CEE_CALLI assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI); if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } // check compatibility of the arguments unsigned int argCount; argCount = sig.numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig.args; while (argCount--) { typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack(); // check that the argument is not a byref for tailcalls VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative); // For unsafe code, we might have parameters containing pointer to the stack location. // Disallow the tailcall for this kind. CORINFO_CLASS_HANDLE classHandle; CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle)); VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative); args = info.compCompHnd->getArgNext(args); } // update popCount popCount += sig.numArgs; // check for 'this' which is on non-static methods, not called via NEWOBJ if (!(mflags & CORINFO_FLG_STATIC)) { // Always update the popCount. // This is crucial for the stack calculation to be correct. typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; if (opcode == CEE_CALLI) { // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object // on the stack. if (tiThis.IsValueClass()) { tiThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative); } else { // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative); } } // Tail calls on constrained calls should be illegal too: // when instantiated at a value type, a constrained call may pass the address of a stack allocated value VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative); // Get the exact view of the signature for an array method if (sig.retType != CORINFO_TYPE_VOID) { if (methodClassFlgs & CORINFO_FLG_ARRAY) { assert(opcode != CEE_CALLI); eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig); } } typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass); typeInfo tiCallerRetType = verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass); // void return type gets morphed into the error type, so we have to treat them specially here if (sig.retType == CORINFO_TYPE_VOID) { VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch", speculative); } else { VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType), NormaliseForStack(tiCallerRetType), true), "tailcall return mismatch", speculative); } // for tailcall, stack must be empty VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative); return true; // Yes, tailcall is legal } /***************************************************************************** * * Checks the IL verification rules for the call */ void Compiler::verVerifyCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, bool tailCall, bool readonlyCall, const BYTE* delegateCreateStart, const BYTE* codeAddr, CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName)) { DWORD mflags; CORINFO_SIG_INFO* sig = nullptr; unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so // this counter is used to keep track of how many items have been // virtually popped // for calli, VerifyOrReturn that this is not a virtual method if (opcode == CEE_CALLI) { Verify(false, "Calli not verifiable"); return; } //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item. mflags = callInfo->verMethodFlags; sig = &callInfo->verSig; if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } // opcode specific check unsigned methodClassFlgs = callInfo->classFlags; switch (opcode) { case CEE_CALLVIRT: // cannot do callvirt on valuetypes VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class"); VerifyOrReturn(sig->hasThis(), "CallVirt on static method"); break; case CEE_NEWOBJ: { assert(!tailCall); // Importer should not allow this VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC), "newobj must be on instance"); if (methodClassFlgs & CORINFO_FLG_DELEGATE) { VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor"); typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack(); typeInfo tiDeclaredFtn = verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack(); VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type"); assert(popCount == 0); typeInfo tiActualObj = impStackTop(1).seTypeInfo; typeInfo tiActualFtn = impStackTop(0).seTypeInfo; VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg"); VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch"); VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF), "delegate object type mismatch"); CORINFO_CLASS_HANDLE objTypeHandle = tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef(); // the method signature must be compatible with the delegate's invoke method // check that for virtual functions, the type of the object used to get the // ftn ptr is the same as the type of the object passed to the delegate ctor. // since this is a bit of work to determine in general, we pattern match stylized // code sequences // the delegate creation code check, which used to be done later, is now done here // so we can read delegateMethodRef directly from // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence; // we then use it in our call to isCompatibleDelegate(). mdMemberRef delegateMethodRef = mdMemberRefNil; VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef), "must create delegates with certain IL"); CORINFO_RESOLVED_TOKEN delegateResolvedToken; delegateResolvedToken.tokenContext = impTokenLookupContextHandle; delegateResolvedToken.tokenScope = info.compScopeHnd; delegateResolvedToken.token = delegateMethodRef; delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method; info.compCompHnd->resolveToken(&delegateResolvedToken); CORINFO_CALL_INFO delegateCallInfo; eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */, CORINFO_CALLINFO_SECURITYCHECKS, &delegateCallInfo); bool isOpenDelegate = false; VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass, tiActualFtn.GetMethod(), pResolvedToken->hClass, &isOpenDelegate), "function incompatible with delegate"); // check the constraints on the target method VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass), "delegate target has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass, tiActualFtn.GetMethod()), "delegate target has unsatisfied method constraints"); // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch) // for additional verification rules for delegates CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod(); DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle); if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiActualObj), "The 'this' parameter to the call must be either the calling method's " "'this' parameter or " "a boxed value type."); } } if (actualMethodAttribs & CORINFO_FLG_PROTECTED) { bool targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC; Verify(targetIsStatic || !isOpenDelegate, "Unverifiable creation of an open instance delegate for a protected member."); CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic) ? info.compClassHnd : tiActualObj.GetClassHandleForObjRef(); // In the case of protected methods, it is a requirement that the 'this' // pointer be a subclass of the current context. Perform this check. Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Accessing protected method through wrong type."); } goto DONE_ARGS; } } // fall thru to default checks FALLTHROUGH; default: VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract"); } VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)), "can only newobj a delegate constructor"); // check compatibility of the arguments unsigned int argCount; argCount = sig->numArgs; CORINFO_ARG_LIST_HANDLE args; args = sig->args; while (argCount--) { typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo; typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack(); VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch"); args = info.compCompHnd->getArgNext(args); } DONE_ARGS: // update popCount popCount += sig->numArgs; // check for 'this' which are is non-static methods, not called via NEWOBJ CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd; if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ)) { typeInfo tiThis = impStackTop(popCount).seTypeInfo; popCount++; // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a reference class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis.IsType(TI_REF)) { instanceClassHnd = tiThis.GetClassHandleForObjRef(); } // Check type compatibility of the this argument typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); } // If this is a call to the base class .ctor, set thisPtr Init for // this block. if (mflags & CORINFO_FLG_CONSTRUCTOR) { if (verTrackObjCtorInitState && tiThis.IsThisPtr() && verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass)) { assert(verCurrentState.thisInitialized != TIS_Bottom); // This should never be the case just from the logic of the verifier. VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit, "Call to base class constructor when 'this' is possibly initialized"); // Otherwise, 'this' is now initialized. verCurrentState.thisInitialized = TIS_Init; tiThis.SetInitialisedObjRef(); } else { // We allow direct calls to value type constructors // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a // constrained callvirt to illegally re-enter a .ctor on a value of reference type. VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(), "Bad call to a constructor"); } } if (pConstrainedResolvedToken != nullptr) { VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call"); typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass); // We just dereference this and test for equality tiThis.DereferenceByRef(); VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint), "this type mismatch with constrained type operand"); // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass); } // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef()) { tiDeclaredThis.SetIsReadonlyByRef(); } VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch"); if (tiThis.IsByRef()) { // Find the actual type where the method exists (as opposed to what is declared // in the metadata). This is to prevent passing a byref as the "this" argument // while calling methods like System.ValueType.GetHashCode() which expect boxed objects. CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod); VerifyOrReturn(eeIsValueClass(actualClassHnd), "Call to base type of valuetype (which is never a valuetype)"); } // Rules for non-virtual call to a non-final virtual method: // Define: // The "this" pointer is considered to be "possibly written" if // 1. Its address have been taken (LDARGA 0) anywhere in the method. // (or) // 2. It has been stored to (STARG.0) anywhere in the method. // A non-virtual call to a non-final virtual method is only allowed if // 1. The this pointer passed to the callee is an instance of a boxed value type. // (or) // 2. The this pointer passed to the callee is the current method's this pointer. // (and) The current method's this pointer is not "possibly written". // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to // virtual methods. (Luckily this does affect .ctors, since they are not virtual). // This is stronger that is strictly needed, but implementing a laxer rule is significantly // hard and more error prone. if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)) { VerifyOrReturn((tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly()) || verIsBoxedValueType(tiThis), "The 'this' parameter to the call must be either the calling method's 'this' parameter or " "a boxed value type."); } } // check any constraints on the callee's class and type parameters VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass), "method has unsatisfied class constraints"); VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod), "method has unsatisfied method constraints"); if (mflags & CORINFO_FLG_PROTECTED) { VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd), "Can't access protected method"); } // Get the exact view of the signature for an array method if (sig->retType != CORINFO_TYPE_VOID) { eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass); } // "readonly." prefixed calls only allowed for the Address operation on arrays. // The methods supported by array types are under the control of the EE // so we can trust that only the Address operation returns a byref. if (readonlyCall) { typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass); VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(), "unexpected use of readonly prefix"); } // Verify the tailcall if (tailCall) { verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false); } } /***************************************************************************** * Checks that a delegate creation is done using the following pattern: * dup * ldvirtftn targetMemberRef * OR * ldftn targetMemberRef * * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if * not in this basic block) * * targetMemberRef is read from the code sequence. * targetMemberRef is validated iff verificationNeeded. */ bool Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart, const BYTE* codeAddr, mdMemberRef& targetMemberRef) { if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]); return true; } else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr)) { targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]); return true; } return false; } typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType) { Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref"); typeInfo ptrVal = verVerifyLDIND(tiTo, instrType); typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack(); if (!tiCompatibleWith(value, normPtrVal, true)) { Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch"); } return ptrVal; } typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType) { assert(!instrType.IsStruct()); typeInfo ptrVal; if (ptr.IsByRef()) { ptrVal = DereferenceByRef(ptr); if (instrType.IsObjRef() && !ptrVal.IsObjRef()) { Verify(false, "bad pointer"); } else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal)) { Verify(false, "pointer not consistent with instr"); } } else { Verify(false, "pointer not byref"); } return ptrVal; } // Verify that the field is used properly. 'tiThis' is NULL for statics, // 'fieldFlags' is the fields attributes, and mutator is true if it is a // ld*flda or a st*fld. // 'enclosingClass' is given if we are accessing a field in some specific type. void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken, const CORINFO_FIELD_INFO& fieldInfo, const typeInfo* tiThis, bool mutator, bool allowPlainStructAsThis) { CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass; unsigned fieldFlags = fieldInfo.fieldFlags; CORINFO_CLASS_HANDLE instanceClass = info.compClassHnd; // for statics, we imagine the instance is the current class. bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0); if (mutator) { Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static"); if ((fieldFlags & CORINFO_FLG_FIELD_FINAL)) { Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd && info.compIsStatic == isStaticField, "bad use of initonly field (set or address taken)"); } } if (tiThis == nullptr) { Verify(isStaticField, "used static opcode with non-static field"); } else { typeInfo tThis = *tiThis; if (allowPlainStructAsThis && tThis.IsValueClass()) { tThis.MakeByRef(); } // If it is null, we assume we can access it (since it will AV shortly) // If it is anything but a refernce class, there is no hierarchy, so // again, we don't need the precise instance class to compute 'protected' access if (tiThis->IsType(TI_REF)) { instanceClass = tiThis->GetClassHandleForObjRef(); } // Note that even if the field is static, we require that the this pointer // satisfy the same constraints as a non-static field This happens to // be simpler and seems reasonable typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass); if (tiDeclaredThis.IsValueClass()) { tiDeclaredThis.MakeByRef(); // we allow read-only tThis, on any field access (even stores!), because if the // class implementor wants to prohibit stores he should make the field private. // we do this by setting the read-only bit on the type we compare tThis to. tiDeclaredThis.SetIsReadonlyByRef(); } else if (verTrackObjCtorInitState && tThis.IsThisPtr()) { // Any field access is legal on "uninitialized" this pointers. // The easiest way to implement this is to simply set the // initialized bit for the duration of the type check on the // field access only. It does not change the state of the "this" // for the function as a whole. Note that the "tThis" is a copy // of the original "this" type (*tiThis) passed in. tThis.SetInitialisedObjRef(); } Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch"); } // Presently the JIT does not check that we don't store or take the address of init-only fields // since we cannot guarantee their immutability and it is not a security issue. // check any constraints on the fields's class --- accessing the field might cause a class constructor to run. VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass), "field has unsatisfied class constraints"); if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED) { Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass), "Accessing protected method through wrong type."); } } void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode) { if (tiOp1.IsNumberType()) { #ifdef TARGET_64BIT Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch"); #else // TARGET_64BIT // [10/17/2013] Consider changing this: to put on my verification lawyer hat, // this is non-conforming to the ECMA Spec: types don't have to be equivalent, // but compatible, since we can coalesce native int with int32 (see section III.1.5). Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch"); #endif // !TARGET_64BIT } else if (tiOp1.IsObjRef()) { switch (opcode) { case CEE_BEQ_S: case CEE_BEQ: case CEE_BNE_UN_S: case CEE_BNE_UN: case CEE_CEQ: case CEE_CGT_UN: break; default: Verify(false, "Cond not allowed on object types"); } Verify(tiOp2.IsObjRef(), "Cond type mismatch"); } else if (tiOp1.IsByRef()) { Verify(tiOp2.IsByRef(), "Cond type mismatch"); } else { Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch"); } } void Compiler::verVerifyThisPtrInitialised() { if (verTrackObjCtorInitState) { Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized"); } } bool Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target) { // Either target == context, in this case calling an alternate .ctor // Or target is the immediate parent of context return ((target == context) || (target == info.compCompHnd->getParentType(context))); } GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } // CoreRT generic virtual method if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* runtimeMethodHandle = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_METHOD_HDL, pCallInfo->hMethod); return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL, gtNewCallArgs(thisPtr, runtimeMethodHandle)); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { if (!pCallInfo->exactContextNeedsRuntimeLookup) { GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewCallArgs(thisPtr)); call->setEntryPoint(pCallInfo->codePointerLookup.constLookup); return call; } // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too. if (IsTargetAbi(CORINFO_CORERT_ABI)) { GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind); return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL, gtNewCallArgs(ctxTree), &pCallInfo->codePointerLookup.lookupKind); } } #endif // Get the exact descriptor for the static callsite GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken); if (exactTypeDesc == nullptr) { // compDonotInline() return nullptr; } GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken); if (exactMethodDesc == nullptr) { // compDonotInline() return nullptr; } GenTreeCall::Use* helpArgs = gtNewCallArgs(exactMethodDesc); helpArgs = gtPrependNewCallArg(exactTypeDesc, helpArgs); helpArgs = gtPrependNewCallArg(thisPtr, helpArgs); // Call helper function. This gets the target address of the final destination callsite. return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs); } //------------------------------------------------------------------------ // impBoxPatternMatch: match and import common box idioms // // Arguments: // pResolvedToken - resolved token from the box operation // codeAddr - position in IL stream after the box instruction // codeEndp - end of IL stream // // Return Value: // Number of IL bytes matched and imported, -1 otherwise // // Notes: // pResolvedToken is known to be a value type; ref type boxing // is handled in the CEE_BOX clause. int Compiler::impBoxPatternMatch(CORINFO_RESOLVED_TOKEN* pResolvedToken, const BYTE* codeAddr, const BYTE* codeEndp, bool makeInlineObservation) { if (codeAddr >= codeEndp) { return -1; } switch (codeAddr[0]) { case CEE_UNBOX_ANY: // box + unbox.any if (codeAddr + 1 + sizeof(mdToken) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } CORINFO_RESOLVED_TOKEN unboxResolvedToken; impResolveToken(codeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // See if the resolved tokens describe types that are equal. const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass); // If so, box/unbox.any is a nop. if (compare == TypeCompareState::Must) { JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n"); // Skip the next unbox.any instruction return 1 + sizeof(mdToken); } } break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: // box + br_true/false if ((codeAddr + ((codeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 0; } GenTree* const treeToBox = impStackTop().val; bool canOptimize = true; GenTree* treeToNullcheck = nullptr; // Can the thing being boxed cause a side effect? if ((treeToBox->gtFlags & GTF_SIDE_EFFECT) != 0) { // Is this a side effect we can replicate cheaply? if (((treeToBox->gtFlags & GTF_SIDE_EFFECT) == GTF_EXCEPT) && treeToBox->OperIs(GT_OBJ, GT_BLK, GT_IND)) { // Yes, we just need to perform a null check if needed. GenTree* const addr = treeToBox->AsOp()->gtGetOp1(); if (fgAddrCouldBeNull(addr)) { treeToNullcheck = addr; } } else { canOptimize = false; } } if (canOptimize) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { JITDUMP("\n Importing BOX; BR_TRUE/FALSE as %sconstant\n", treeToNullcheck == nullptr ? "" : "nullcheck+"); impPopStack(); GenTree* result = gtNewIconNode(1); if (treeToNullcheck != nullptr) { GenTree* nullcheck = gtNewNullCheck(treeToNullcheck, compCurBB); result = gtNewOperNode(GT_COMMA, TYP_INT, nullcheck, result); } impPushOnStack(result, typeInfo(TI_INT)); return 0; } } } break; case CEE_ISINST: if (codeAddr + 1 + sizeof(mdToken) + 1 <= codeEndp) { const BYTE* nextCodeAddr = codeAddr + 1 + sizeof(mdToken); switch (nextCodeAddr[0]) { // box + isinst + br_true/false case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: if ((nextCodeAddr + ((nextCodeAddr[0] >= CEE_BRFALSE) ? 5 : 2)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 1 + sizeof(mdToken); } if (!(impStackTop().val->gtFlags & GTF_SIDE_EFFECT)) { CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); if (boxHelper == CORINFO_HELP_BOX) { CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(pResolvedToken->hClass, isInstResolvedToken.hClass); if (castResult != TypeCompareState::May) { JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant\n"); impPopStack(); impPushOnStack(gtNewIconNode((castResult == TypeCompareState::Must) ? 1 : 0), typeInfo(TI_INT)); // Skip the next isinst instruction return 1 + sizeof(mdToken); } } else if (boxHelper == CORINFO_HELP_BOX_NULLABLE) { // For nullable we're going to fold it to "ldfld hasValue + brtrue/brfalse" or // "ldc.i4.0 + brtrue/brfalse" in case if the underlying type is not castable to // the target type. CORINFO_RESOLVED_TOKEN isInstResolvedToken; impResolveToken(codeAddr + 1, &isInstResolvedToken, CORINFO_TOKENKIND_Casting); CORINFO_CLASS_HANDLE nullableCls = pResolvedToken->hClass; CORINFO_CLASS_HANDLE underlyingCls = info.compCompHnd->getTypeForBox(nullableCls); TypeCompareState castResult = info.compCompHnd->compareTypesForCast(underlyingCls, isInstResolvedToken.hClass); if (castResult == TypeCompareState::Must) { const CORINFO_FIELD_HANDLE hasValueFldHnd = info.compCompHnd->getFieldInClass(nullableCls, 0); assert(info.compCompHnd->getFieldOffset(hasValueFldHnd) == 0); assert(!strcmp(info.compCompHnd->getFieldName(hasValueFldHnd, nullptr), "hasValue")); GenTree* objToBox = impPopStack().val; // Spill struct to get its address (to access hasValue field) objToBox = impGetStructAddr(objToBox, nullableCls, (unsigned)CHECK_SPILL_ALL, true); impPushOnStack(gtNewFieldRef(TYP_BOOL, hasValueFldHnd, objToBox, 0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as nullableVT.hasValue\n"); return 1 + sizeof(mdToken); } else if (castResult == TypeCompareState::MustNot) { impPopStack(); impPushOnStack(gtNewIconNode(0), typeInfo(TI_INT)); JITDUMP("\n Importing BOX; ISINST; BR_TRUE/FALSE as constant (false)\n"); return 1 + sizeof(mdToken); } } } } break; // box + isinst + unbox.any case CEE_UNBOX_ANY: if ((nextCodeAddr + 1 + sizeof(mdToken)) <= codeEndp) { if (makeInlineObservation) { compInlineResult->Note(InlineObservation::CALLEE_FOLDABLE_BOX); return 2 + sizeof(mdToken) * 2; } // See if the resolved tokens in box, isinst and unbox.any describe types that are equal. CORINFO_RESOLVED_TOKEN isinstResolvedToken = {}; impResolveToken(codeAddr + 1, &isinstResolvedToken, CORINFO_TOKENKIND_Class); if (info.compCompHnd->compareTypesForEquality(isinstResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { CORINFO_RESOLVED_TOKEN unboxResolvedToken = {}; impResolveToken(nextCodeAddr + 1, &unboxResolvedToken, CORINFO_TOKENKIND_Class); // If so, box + isinst + unbox.any is a nop. if (info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, pResolvedToken->hClass) == TypeCompareState::Must) { JITDUMP("\n Importing BOX; ISINST, UNBOX.ANY as NOP\n"); return 2 + sizeof(mdToken) * 2; } } } break; } } break; default: break; } return -1; } //------------------------------------------------------------------------ // impImportAndPushBox: build and import a value-type box // // Arguments: // pResolvedToken - resolved token from the box operation // // Return Value: // None. // // Side Effects: // The value to be boxed is popped from the stack, and a tree for // the boxed value is pushed. This method may create upstream // statements, spill side effecting trees, and create new temps. // // If importing an inlinee, we may also discover the inline must // fail. If so there is no new value pushed on the stack. Callers // should use CompDoNotInline after calling this method to see if // ongoing importation should be aborted. // // Notes: // Boxing of ref classes results in the same value as the value on // the top of the stack, so is handled inline in impImportBlockCode // for the CEE_BOX case. Only value or primitive type boxes make it // here. // // Boxing for nullable types is done via a helper call; boxing // of other value types is expanded inline or handled via helper // call, depending on the jit's codegen mode. // // When the jit is operating in size and time constrained modes, // using a helper call here can save jit time and code size. But it // also may inhibit cleanup optimizations that could have also had a // even greater benefit effect on code size and jit time. An optimal // strategy may need to peek ahead and see if it is easy to tell how // the box is being used. For now, we defer. void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken) { // Spill any special side effects impSpillSpecialSideEff(); // Get get the expression to box from the stack. GenTree* op1 = nullptr; GenTree* op2 = nullptr; StackEntry se = impPopStack(); CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle(); GenTree* exprToBox = se.val; // Look at what helper we should use. CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass); // Determine what expansion to prefer. // // In size/time/debuggable constrained modes, the helper call // expansion for box is generally smaller and is preferred, unless // the value to box is a struct that comes from a call. In that // case the call can construct its return value directly into the // box payload, saving possibly some up-front zeroing. // // Currently primitive type boxes always get inline expanded. We may // want to do the same for small structs if they don't come from // calls and don't have GC pointers, since explicitly copying such // structs is cheap. JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via"); bool canExpandInline = (boxHelper == CORINFO_HELP_BOX); bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && opts.OptimizationDisabled(); bool expandInline = canExpandInline && !optForSize; if (expandInline) { JITDUMP(" inline allocate/copy sequence\n"); // we are doing 'normal' boxing. This means that we can inline the box operation // Box(expr) gets morphed into // temp = new(clsHnd) // cpobj(temp+4, expr, clsHnd) // push temp // The code paths differ slightly below for structs and primitives because // "cpobj" differs in these cases. In one case you get // impAssignStructPtr(temp+4, expr, clsHnd) // and the other you get // *(temp+4) = expr if (opts.OptimizationDisabled()) { // For minopts/debug code, try and minimize the total number // of box temps by reusing an existing temp when possible. if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM) { impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper")); } } else { // When optimizing, use a new temp for each box operation // since we then know the exact class of the box temp. impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper")); lvaTable[impBoxTemp].lvType = TYP_REF; lvaTable[impBoxTemp].lvSingleDef = 1; JITDUMP("Marking V%02u as a single def local\n", impBoxTemp); const bool isExact = true; lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact); } // needs to stay in use until this box expression is appended // some other node. We approximate this by keeping it alive until // the opcode stack becomes empty impBoxTempInUse = true; // Remember the current last statement in case we need to move // a range of statements to ensure the box temp is initialized // before it's used. // Statement* const cursor = impLastStmt; const bool useParent = false; op1 = gtNewAllocObjNode(pResolvedToken, useParent); if (op1 == nullptr) { // If we fail to create the newobj node, we must be inlining // and have run across a type we can't describe. // assert(compDonotInline()); return; } // Remember that this basic block contains 'new' of an object, // and so does this method // compCurBB->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Assign the boxed object to the box temp. // GenTree* asg = gtNewTempAssign(impBoxTemp, op1); Statement* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // If the exprToBox is a call that returns its value via a ret buf arg, // move the assignment statement(s) before the call (which must be a top level tree). // // We do this because impAssignStructPtr (invoked below) will // back-substitute into a call when it sees a GT_RET_EXPR and the call // has a hidden buffer pointer, So we need to reorder things to avoid // creating out-of-sequence IR. // if (varTypeIsStruct(exprToBox) && exprToBox->OperIs(GT_RET_EXPR)) { GenTreeCall* const call = exprToBox->AsRetExpr()->gtInlineCandidate->AsCall(); if (call->HasRetBufArg()) { JITDUMP("Must insert newobj stmts for box before call [%06u]\n", dspTreeID(call)); // Walk back through the statements in this block, looking for the one // that has this call as the root node. // // Because gtNewTempAssign (above) may have added statements that // feed into the actual assignment we need to move this set of added // statements as a group. // // Note boxed allocations are side-effect free (no com or finalizer) so // our only worries here are (correctness) not overlapping the box temp // lifetime and (perf) stretching the temp lifetime across the inlinee // body. // // Since this is an inline candidate, we must be optimizing, and so we have // a unique box temp per call. So no worries about overlap. // assert(!opts.OptimizationDisabled()); // Lifetime stretching could addressed with some extra cleverness--sinking // the allocation back down to just before the copy, once we figure out // where the copy is. We defer for now. // Statement* insertBeforeStmt = cursor; noway_assert(insertBeforeStmt != nullptr); while (true) { if (insertBeforeStmt->GetRootNode() == call) { break; } // If we've searched all the statements in the block and failed to // find the call, then something's wrong. // noway_assert(insertBeforeStmt != impStmtList); insertBeforeStmt = insertBeforeStmt->GetPrevStmt(); } // Found the call. Move the statements comprising the assignment. // JITDUMP("Moving " FMT_STMT "..." FMT_STMT " before " FMT_STMT "\n", cursor->GetNextStmt()->GetID(), asgStmt->GetID(), insertBeforeStmt->GetID()); assert(asgStmt == impLastStmt); do { Statement* movingStmt = impExtractLastStmt(); impInsertStmtBefore(movingStmt, insertBeforeStmt); insertBeforeStmt = movingStmt; } while (impLastStmt != cursor); } } // Create a pointer to the box payload in op1. // op1 = gtNewLclvNode(impBoxTemp, TYP_REF); op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2); // Copy from the exprToBox to the box payload. // if (varTypeIsStruct(exprToBox)) { assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls)); op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL); } else { var_types lclTyp = exprToBox->TypeGet(); if (lclTyp == TYP_BYREF) { lclTyp = TYP_I_IMPL; } CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass); if (impIsPrimitive(jitType)) { lclTyp = JITtype2varType(jitType); } var_types srcTyp = exprToBox->TypeGet(); var_types dstTyp = lclTyp; // We allow float <-> double mismatches and implicit truncation for small types. assert((genActualType(srcTyp) == genActualType(dstTyp)) || (varTypeIsFloating(srcTyp) == varTypeIsFloating(dstTyp))); // Note regarding small types. // We are going to store to the box here via an indirection, so the cast added below is // redundant, since the store has an implicit truncation semantic. The reason we still // add this cast is so that the code which deals with GT_BOX optimizations does not have // to account for this implicit truncation (e. g. understand that BOX<byte>(0xFF + 1) is // actually BOX<byte>(0) or deal with signedness mismatch and other GT_CAST complexities). if (srcTyp != dstTyp) { exprToBox = gtNewCastNode(genActualType(dstTyp), exprToBox, false, dstTyp); } op1 = gtNewAssignNode(gtNewOperNode(GT_IND, dstTyp, op1), exprToBox); } // Spill eval stack to flush out any pending side effects. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox")); // Set up this copy as a second assignment. Statement* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); op1 = gtNewLclvNode(impBoxTemp, TYP_REF); // Record that this is a "box" node and keep track of the matching parts. op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt); // If it is a value class, mark the "box" node. We can use this information // to optimise several cases: // "box(x) == null" --> false // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod" // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod" op1->gtFlags |= GTF_BOX_VALUE; assert(op1->IsBoxedValue()); assert(asg->gtOper == GT_ASG); } else { // Don't optimize, just call the helper and be done with it. JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable"); assert(operCls != nullptr); // Ensure that the value class is restored op2 = impTokenToHandle(pResolvedToken, nullptr, true /* mustRestoreHandle */); if (op2 == nullptr) { // We must be backing out of an inline. assert(compDonotInline()); return; } GenTreeCall::Use* args = gtNewCallArgs(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true)); op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args); } /* Push the result back on the stack, */ /* even if clsHnd is a value class we want the TI_REF */ typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass)); impPushOnStack(op1, tiRetVal); } //------------------------------------------------------------------------ // impImportNewObjArray: Build and import `new` of multi-dimmensional array // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // pCallInfo - The CORINFO_CALL_INFO that has been initialized // by a call to CEEInfo::getCallInfo(). // // Assumptions: // The multi-dimensional array constructor arguments (array dimensions) are // pushed on the IL stack on entry to this method. // // Notes: // Multi-dimensional array constructors are imported as calls to a JIT // helper, not as regular calls. void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo) { GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken); if (classHandle == nullptr) { // compDonotInline() return; } assert(pCallInfo->sig.numArgs); GenTree* node; // Reuse the temp used to pass the array dimensions to avoid bloating // the stack frame in case there are multiple calls to multi-dim array // constructors within a single method. if (lvaNewObjArrayArgs == BAD_VAR_NUM) { lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs")); lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK; lvaTable[lvaNewObjArrayArgs].lvExactSize = 0; } // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers // for our call to CORINFO_HELP_NEW_MDARR. lvaTable[lvaNewObjArrayArgs].lvExactSize = max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32)); // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments // to one allocation at a time. impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray")); // // The arguments of the CORINFO_HELP_NEW_MDARR helper are: // - Array class handle // - Number of dimension arguments // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp. // node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node); // Pop dimension arguments from the stack one at a time and store it // into lvaNewObjArrayArgs temp. for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--) { GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT); GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK); dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest); dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest, new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i)); dest = gtNewOperNode(GT_IND, TYP_INT, dest); node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node); } GenTreeCall::Use* args = gtNewCallArgs(node); // pass number of arguments to the helper args = gtPrependNewCallArg(gtNewIconNode(pCallInfo->sig.numArgs), args); args = gtPrependNewCallArg(classHandle, args); node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args); for (GenTreeCall::Use& use : node->AsCall()->Args()) { node->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } node->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass; // Remember that this basic block contains 'new' of a md array compCurBB->bbFlags |= BBF_HAS_NEWARRAY; impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass)); } GenTree* Compiler::impTransformThis(GenTree* thisPtr, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, CORINFO_THIS_TRANSFORM transform) { switch (transform) { case CORINFO_DEREF_THIS: { GenTree* obj = thisPtr; // This does a LDIND on the obj, which should be a byref. pointing to a ref impBashVarAddrsToI(obj); assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF); CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj); // ldind could point anywhere, example a boxed class static int obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); return obj; } case CORINFO_BOX_THIS: { // Constraint calls where there might be no // unboxed entry point require us to implement the call via helper. // These only occur when a possible target of the call // may have inherited an implementation of an interface // method from System.Object or System.ValueType. The EE does not provide us with // "unboxed" versions of these methods. GenTree* obj = thisPtr; assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL); obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj); obj->gtFlags |= GTF_EXCEPT; CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass); if (impIsPrimitive(jitTyp)) { if (obj->OperIsBlk()) { obj->ChangeOperUnchecked(GT_IND); // Obj could point anywhere, example a boxed class static int obj->gtFlags |= GTF_IND_TGTANYWHERE; obj->AsOp()->gtOp2 = nullptr; // must be zero for tree walkers } obj->gtType = JITtype2varType(jitTyp); assert(varTypeIsArithmetic(obj->gtType)); } // This pushes on the dereferenced byref // This is then used immediately to box. impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack()); // This pops off the byref-to-a-value-type remaining on the stack and // replaces it with a boxed object. // This is then used as the object to the virtual call immediately below. impImportAndPushBox(pConstrainedResolvedToken); if (compDonotInline()) { return nullptr; } obj = impPopStack().val; return obj; } case CORINFO_NO_THIS_TRANSFORM: default: return thisPtr; } } //------------------------------------------------------------------------ // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method. // // Return Value: // true if PInvoke inlining should be enabled in current method, false otherwise // // Notes: // Checks a number of ambient conditions where we could pinvoke but choose not to bool Compiler::impCanPInvokeInline() { return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke ; } //------------------------------------------------------------------------ // impCanPInvokeInlineCallSite: basic legality checks using information // from a call to see if the call qualifies as an inline pinvoke. // // Arguments: // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Return Value: // true if this call can legally qualify as an inline pinvoke, false otherwise // // Notes: // For runtimes that support exception handling interop there are // restrictions on using inline pinvoke in handler regions. // // * We have to disable pinvoke inlining inside of filters because // in case the main execution (i.e. in the try block) is inside // unmanaged code, we cannot reuse the inlined stub (we still need // the original state until we are in the catch handler) // // * We disable pinvoke inlining inside handlers since the GSCookie // is in the inlined Frame (see // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but // this would not protect framelets/return-address of handlers. // // These restrictions are currently also in place for CoreCLR but // can be relaxed when coreclr/#8459 is addressed. bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block) { if (block->hasHndIndex()) { return false; } // The remaining limitations do not apply to CoreRT if (IsTargetAbi(CORINFO_CORERT_ABI)) { return true; } #ifdef TARGET_64BIT // On 64-bit platforms, we disable pinvoke inlining inside of try regions. // Note that this could be needed on other architectures too, but we // haven't done enough investigation to know for sure at this point. // // Here is the comment from JIT64 explaining why: // [VSWhidbey: 611015] - because the jitted code links in the // Frame (instead of the stub) we rely on the Frame not being // 'active' until inside the stub. This normally happens by the // stub setting the return address pointer in the Frame object // inside the stub. On a normal return, the return address // pointer is zeroed out so the Frame can be safely re-used, but // if an exception occurs, nobody zeros out the return address // pointer. Thus if we re-used the Frame object, it would go // 'active' as soon as we link it into the Frame chain. // // Technically we only need to disable PInvoke inlining if we're // in a handler or if we're in a try body with a catch or // filter/except where other non-handler code in this method // might run and try to re-use the dirty Frame object. // // A desktop test case where this seems to matter is // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe if (block->hasTryIndex()) { // This does not apply to the raw pinvoke call that is inside the pinvoke // ILStub. In this case, we have to inline the raw pinvoke call into the stub, // otherwise we would end up with a stub that recursively calls itself, and end // up with a stack overflow. if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { return true; } return false; } #endif // TARGET_64BIT return true; } //------------------------------------------------------------------------ // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so // if it can be expressed as an inline pinvoke. // // Arguments: // call - tree for the call // methHnd - handle for the method being called (may be null) // sig - signature of the method being called // mflags - method flags for the method being called // block - block contaning the call, or for inlinees, block // containing the call being inlined // // Notes: // Sets GTF_CALL_M_PINVOKE on the call for pinvokes. // // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the // call passes a combination of legality and profitabilty checks. // // If GTF_CALL_UNMANAGED is set, increments info.compUnmanagedCallCountWithGCTransition void Compiler::impCheckForPInvokeCall( GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block) { CorInfoCallConvExtension unmanagedCallConv; // If VM flagged it as Pinvoke, flag the call node accordingly if ((mflags & CORINFO_FLG_PINVOKE) != 0) { call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE; } bool suppressGCTransition = false; if (methHnd) { if ((mflags & CORINFO_FLG_PINVOKE) == 0) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd, nullptr, &suppressGCTransition); } else { if (sig->getCallConv() == CORINFO_CALLCONV_DEFAULT || sig->getCallConv() == CORINFO_CALLCONV_VARARG) { return; } unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(nullptr, sig, &suppressGCTransition); assert(!call->gtCallCookie); } if (suppressGCTransition) { call->gtCallMoreFlags |= GTF_CALL_M_SUPPRESS_GC_TRANSITION; } // If we can't get the unmanaged calling convention or the calling convention is unsupported in the JIT, // return here without inlining the native call. if (unmanagedCallConv == CorInfoCallConvExtension::Managed || unmanagedCallConv == CorInfoCallConvExtension::Fastcall || unmanagedCallConv == CorInfoCallConvExtension::FastcallMemberFunction) { return; } optNativeCallCount++; if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI))) { // PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been // converted to regular method calls earlier using convertPInvokeCalliToCall. // PInvoke CALLI in IL stubs must be inlined } else { // Check legality if (!impCanPInvokeInlineCallSite(block)) { return; } // Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive // inlining in CoreRT. Skip the ambient conditions checks and profitability checks. if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0) { if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && opts.ShouldUsePInvokeHelpers()) { // Raw PInvoke call in PInvoke IL stub generated must be inlined to avoid infinite // recursive calls to the stub. } else { if (!impCanPInvokeInline()) { return; } // Size-speed tradeoff: don't use inline pinvoke at rarely // executed call sites. The non-inline version is more // compact. if (block->isRunRarely()) { return; } } } // The expensive check should be last if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig)) { return; } } JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s\n", info.compFullName)); call->gtFlags |= GTF_CALL_UNMANAGED; call->unmgdCallConv = unmanagedCallConv; if (!call->IsSuppressGCTransition()) { info.compUnmanagedCallCountWithGCTransition++; } // AMD64 convention is same for native and managed if (unmanagedCallConv == CorInfoCallConvExtension::C || unmanagedCallConv == CorInfoCallConvExtension::CMemberFunction) { call->gtFlags |= GTF_CALL_POP_ARGS; } if (unmanagedCallConv == CorInfoCallConvExtension::Thiscall) { call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL; } } GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, const DebugInfo& di) { var_types callRetTyp = JITtype2varType(sig->retType); /* The function pointer is on top of the stack - It may be a * complex expression. As it is evaluated after the args, * it may cause registered args to be spilled. Simply spill it. */ // Ignore this trivial case. if (impStackTop().val->gtOper != GT_LCL_VAR) { impSpillStackEntry(verCurrentState.esStackDepth - 1, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall")); } /* Get the function pointer */ GenTree* fptr = impPopStack().val; // The function pointer is typically a sized to match the target pointer size // However, stubgen IL optimization can change LDC.I8 to LDC.I4 // See ILCodeStream::LowerOpcode assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT); #ifdef DEBUG // This temporary must never be converted to a double in stress mode, // because that can introduce a call to the cast helper after the // arguments have already been evaluated. if (fptr->OperGet() == GT_LCL_VAR) { lvaTable[fptr->AsLclVarCommon()->GetLclNum()].lvKeepType = 1; } #endif /* Create the call node */ GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); #ifdef UNIX_X86_ABI call->gtFlags &= ~GTF_CALL_POP_ARGS; #endif return call; } /*****************************************************************************/ void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig) { assert(call->gtFlags & GTF_CALL_UNMANAGED); /* Since we push the arguments in reverse order (i.e. right -> left) * spill any side effects from the stack * * OBS: If there is only one side effect we do not need to spill it * thus we have to spill all side-effects except last one */ unsigned lastLevelWithSideEffects = UINT_MAX; unsigned argsToReverse = sig->numArgs; // For "thiscall", the first argument goes in a register. Since its // order does not need to be changed, we do not need to spill it if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { assert(argsToReverse); argsToReverse--; } #ifndef TARGET_X86 // Don't reverse args on ARM or x64 - first four args always placed in regs in order argsToReverse = 0; #endif for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++) { if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF) { assert(lastLevelWithSideEffects == UINT_MAX); impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect")); } else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) { if (lastLevelWithSideEffects != UINT_MAX) { /* We had a previous side effect - must spill it */ impSpillStackEntry(lastLevelWithSideEffects, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect")); /* Record the level for the current side effect in case we will spill it */ lastLevelWithSideEffects = level; } else { /* This is the first side effect encountered - record its level */ lastLevelWithSideEffects = level; } } } /* The argument list is now "clean" - no out-of-order side effects * Pop the argument list in reverse order */ GenTreeCall::Use* args = impPopReverseCallArgs(sig->numArgs, sig, sig->numArgs - argsToReverse); call->AsCall()->gtCallArgs = args; if (call->AsCall()->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL) { GenTree* thisPtr = args->GetNode(); impBashVarAddrsToI(thisPtr); assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF); } for (GenTreeCall::Use& argUse : GenTreeCall::UseList(args)) { GenTree* arg = argUse.GetNode(); call->gtFlags |= arg->gtFlags & GTF_GLOB_EFFECT; // We should not be passing gc typed args to an unmanaged call. if (varTypeIsGC(arg->TypeGet())) { // Tolerate byrefs by retyping to native int. // // This is needed or we'll generate inconsistent GC info // for this arg at the call site (gc info says byref, // pinvoke sig says native int). // if (arg->TypeGet() == TYP_BYREF) { arg->ChangeType(TYP_I_IMPL); } else { assert(!"*** invalid IL: gc ref passed to unmanaged call"); } } } } //------------------------------------------------------------------------ // impInitClass: Build a node to initialize the class before accessing the // field if necessary // // Arguments: // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized // by a call to CEEInfo::resolveToken(). // // Return Value: If needed, a pointer to the node that will perform the class // initializtion. Otherwise, nullptr. // GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle); if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0) { return nullptr; } bool runtimeLookup; GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup); if (node == nullptr) { assert(compDonotInline()); return nullptr; } if (runtimeLookup) { node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(node)); } else { // Call the shared non gc static helper, as its the fastest node = fgGetSharedCCtor(pResolvedToken->hClass); } return node; } GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp) { GenTree* op1 = nullptr; #if defined(DEBUG) // If we're replaying under SuperPMI, we're going to read the data stored by SuperPMI and use it // for optimization. Unfortunately, SuperPMI doesn't implement a guarantee on the alignment of // this data, so for some platforms which don't allow unaligned access (e.g., Linux arm32), // this can fault. We should fix SuperPMI to guarantee alignment, but that is a big change. // Instead, simply fix up the data here for future use. // This variable should be the largest size element, with the largest alignment requirement, // and the native C++ compiler should guarantee sufficient alignment. double aligned_data = 0.0; void* p_aligned_data = &aligned_data; if (info.compMethodSuperPMIIndex != -1) { switch (lclTyp) { case TYP_BOOL: case TYP_BYTE: case TYP_UBYTE: static_assert_no_msg(sizeof(unsigned __int8) == sizeof(bool)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(signed char)); static_assert_no_msg(sizeof(unsigned __int8) == sizeof(unsigned char)); // No alignment necessary for byte. break; case TYP_SHORT: case TYP_USHORT: static_assert_no_msg(sizeof(unsigned __int16) == sizeof(short)); static_assert_no_msg(sizeof(unsigned __int16) == sizeof(unsigned short)); if ((size_t)fldAddr % sizeof(unsigned __int16) != 0) { *(unsigned __int16*)p_aligned_data = GET_UNALIGNED_16(fldAddr); fldAddr = p_aligned_data; } break; case TYP_INT: case TYP_UINT: case TYP_FLOAT: static_assert_no_msg(sizeof(unsigned __int32) == sizeof(int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(unsigned int)); static_assert_no_msg(sizeof(unsigned __int32) == sizeof(float)); if ((size_t)fldAddr % sizeof(unsigned __int32) != 0) { *(unsigned __int32*)p_aligned_data = GET_UNALIGNED_32(fldAddr); fldAddr = p_aligned_data; } break; case TYP_LONG: case TYP_ULONG: case TYP_DOUBLE: static_assert_no_msg(sizeof(unsigned __int64) == sizeof(__int64)); static_assert_no_msg(sizeof(unsigned __int64) == sizeof(double)); if ((size_t)fldAddr % sizeof(unsigned __int64) != 0) { *(unsigned __int64*)p_aligned_data = GET_UNALIGNED_64(fldAddr); fldAddr = p_aligned_data; } break; default: assert(!"Unexpected lclTyp"); break; } } #endif // DEBUG switch (lclTyp) { int ival; __int64 lval; double dval; case TYP_BOOL: ival = *((bool*)fldAddr); goto IVAL_COMMON; case TYP_BYTE: ival = *((signed char*)fldAddr); goto IVAL_COMMON; case TYP_UBYTE: ival = *((unsigned char*)fldAddr); goto IVAL_COMMON; case TYP_SHORT: ival = *((short*)fldAddr); goto IVAL_COMMON; case TYP_USHORT: ival = *((unsigned short*)fldAddr); goto IVAL_COMMON; case TYP_UINT: case TYP_INT: ival = *((int*)fldAddr); IVAL_COMMON: op1 = gtNewIconNode(ival); break; case TYP_LONG: case TYP_ULONG: lval = *((__int64*)fldAddr); op1 = gtNewLconNode(lval); break; case TYP_FLOAT: dval = *((float*)fldAddr); op1 = gtNewDconNode(dval); op1->gtType = TYP_FLOAT; break; case TYP_DOUBLE: dval = *((double*)fldAddr); op1 = gtNewDconNode(dval); break; default: assert(!"Unexpected lclTyp"); break; } return op1; } GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_ACCESS_FLAGS access, CORINFO_FIELD_INFO* pFieldInfo, var_types lclTyp) { // Ordinary static fields never overlap. RVA statics, however, can overlap (if they're // mapped to the same ".data" declaration). That said, such mappings only appear to be // possible with ILASM, and in ILASM-produced (ILONLY) images, RVA statics are always // read-only (using "stsfld" on them is UB). In mixed-mode assemblies, RVA statics can // be mutable, but the only current producer of such images, the C++/CLI compiler, does // not appear to support mapping different fields to the same address. So we will say // that "mutable overlapping RVA statics" are UB as well. If this ever changes, code in // morph and value numbering will need to be updated to respect "gtFldMayOverlap" and // "NotAField FldSeq". // For statics that are not "boxed", the initial address tree will contain the field sequence. // For those that are, we will attach it later, when adding the indirection for the box, since // that tree will represent the true address. bool isBoxedStatic = (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) != 0; FieldSeqNode* innerFldSeq = !isBoxedStatic ? GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField) : FieldSeqStore::NotAField(); GenTree* op1; switch (pFieldInfo->fieldAccessor) { case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: { assert(!compIsForInlining()); // We first call a special helper to get the statics base pointer op1 = impParentClassTokenToHandle(pResolvedToken); // compIsForInlining() is false so we should not get NULL here assert(op1 != nullptr); var_types type = TYP_BYREF; switch (pFieldInfo->helper) { case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE: type = TYP_I_IMPL; break; case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE: case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE: break; default: assert(!"unknown generic statics helper"); break; } op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewCallArgs(op1)); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); } break; case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); } else #endif { op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper); } op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); break; } case CORINFO_FIELD_STATIC_READYTORUN_HELPER: { #ifdef FEATURE_READYTORUN assert(opts.IsReadyToRun()); assert(!compIsForInlining()); CORINFO_LOOKUP_KIND kind; info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind); assert(kind.needsRuntimeLookup); GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind); GenTreeCall::Use* args = gtNewCallArgs(ctxTree); GenTreeFlags callFlags = GTF_EMPTY; if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT) { callFlags |= GTF_CALL_HOISTABLE; } var_types type = TYP_BYREF; op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args); op1->gtFlags |= callFlags; op1->AsCall()->setEntryPoint(pFieldInfo->fieldLookup); op1 = gtNewOperNode(GT_ADD, type, op1, gtNewIconNode(pFieldInfo->offset, innerFldSeq)); #else unreached(); #endif // FEATURE_READYTORUN } break; default: { // Do we need the address of a static field? // if (access & CORINFO_ACCESS_ADDRESS) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr); // We should always be able to access this static's address directly. assert(pFldAddr == nullptr); // Create the address node. GenTreeFlags handleKind = isBoxedStatic ? GTF_ICON_STATIC_BOX_PTR : GTF_ICON_STATIC_HDL; op1 = gtNewIconHandleNode((size_t)fldAddr, handleKind, innerFldSeq); #ifdef DEBUG op1->AsIntCon()->gtTargetHandle = op1->AsIntCon()->gtIconVal; #endif if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_ICON_INITCLASS; } } else // We need the value of a static field { // In future, it may be better to just create the right tree here instead of folding it later. op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField); if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { op1->gtFlags |= GTF_FLD_INITCLASS; } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1->ChangeType(TYP_REF); // points at boxed object op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING; } } return op1; } break; } } if (isBoxedStatic) { FieldSeqNode* outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(TARGET_POINTER_SIZE, outerFldSeq)); } if (!(access & CORINFO_ACCESS_ADDRESS)) { if (varTypeIsStruct(lclTyp)) { // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT. op1 = gtNewObjNode(pFieldInfo->structType, op1); } else { op1 = gtNewOperNode(GT_IND, lclTyp, op1); op1->gtFlags |= GTF_GLOB_REF; } } return op1; } // In general try to call this before most of the verification work. Most people expect the access // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns // out if you can't access something we also think that you're unverifiable for other reasons. void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { if (result != CORINFO_ACCESS_ALLOWED) { impHandleAccessAllowedInternal(result, helperCall); } } void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall) { switch (result) { case CORINFO_ACCESS_ALLOWED: break; case CORINFO_ACCESS_ILLEGAL: // if we're verifying, then we need to reject the illegal access to ensure that we don't think the // method is verifiable. Otherwise, delay the exception to runtime. if (compIsForImportOnly()) { info.compCompHnd->ThrowExceptionForHelper(helperCall); } else { impInsertHelperCall(helperCall); } break; } } void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo) { // Construct the argument list GenTreeCall::Use* args = nullptr; assert(helperInfo->helperNum != CORINFO_HELP_UNDEF); for (unsigned i = helperInfo->numArgs; i > 0; --i) { const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1]; GenTree* currentArg = nullptr; switch (helperArg.argType) { case CORINFO_HELPER_ARG_TYPE_Field: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun( info.compCompHnd->getFieldClass(helperArg.fieldHandle)); currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle); break; case CORINFO_HELPER_ARG_TYPE_Method: info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle); currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle); break; case CORINFO_HELPER_ARG_TYPE_Class: info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle); currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle); break; case CORINFO_HELPER_ARG_TYPE_Module: currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle); break; case CORINFO_HELPER_ARG_TYPE_Const: currentArg = gtNewIconNode(helperArg.constant); break; default: NO_WAY("Illegal helper arg type"); } args = gtPrependNewCallArg(currentArg, args); } /* TODO-Review: * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee. * Also, consider sticking this in the first basic block. */ GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args); impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } //------------------------------------------------------------------------ // impTailCallRetTypeCompatible: Checks whether the return types of caller // and callee are compatible so that calle can be tail called. // sizes are not supported integral type sizes return values to temps. // // Arguments: // allowWidening -- whether to allow implicit widening by the callee. // For instance, allowing int32 -> int16 tailcalls. // The managed calling convention allows this, but // we don't want explicit tailcalls to depend on this // detail of the managed calling convention. // callerRetType -- the caller's return type // callerRetTypeClass - the caller's return struct type // callerCallConv -- calling convention of the caller // calleeRetType -- the callee's return type // calleeRetTypeClass - the callee return struct type // calleeCallConv -- calling convention of the callee // // Returns: // True if the tailcall types are compatible. // // Remarks: // Note that here we don't check compatibility in IL Verifier sense, but on the // lines of return types getting returned in the same return register. bool Compiler::impTailCallRetTypeCompatible(bool allowWidening, var_types callerRetType, CORINFO_CLASS_HANDLE callerRetTypeClass, CorInfoCallConvExtension callerCallConv, var_types calleeRetType, CORINFO_CLASS_HANDLE calleeRetTypeClass, CorInfoCallConvExtension calleeCallConv) { // Early out if the types are the same. if (callerRetType == calleeRetType) { return true; } // For integral types the managed calling convention dictates that callee // will widen the return value to 4 bytes, so we can allow implicit widening // in managed to managed tailcalls when dealing with <= 4 bytes. bool isManaged = (callerCallConv == CorInfoCallConvExtension::Managed) && (calleeCallConv == CorInfoCallConvExtension::Managed); if (allowWidening && isManaged && varTypeIsIntegral(callerRetType) && varTypeIsIntegral(calleeRetType) && (genTypeSize(callerRetType) <= 4) && (genTypeSize(calleeRetType) <= genTypeSize(callerRetType))) { return true; } // If the class handles are the same and not null, the return types are compatible. if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass)) { return true; } #if defined(TARGET_AMD64) || defined(TARGET_ARM64) // Jit64 compat: if (callerRetType == TYP_VOID) { // This needs to be allowed to support the following IL pattern that Jit64 allows: // tail.call // pop // ret // // Note that the above IL pattern is not valid as per IL verification rules. // Therefore, only full trust code can take advantage of this pattern. return true; } // These checks return true if the return value type sizes are the same and // get returned in the same return register i.e. caller doesn't need to normalize // return value. Some of the tail calls permitted by below checks would have // been rejected by IL Verifier before we reached here. Therefore, only full // trust code can make those tail calls. unsigned callerRetTypeSize = 0; unsigned calleeRetTypeSize = 0; bool isCallerRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs, callerCallConv); bool isCalleeRetTypMBEnreg = VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs, calleeCallConv); if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg) { return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize); } #endif // TARGET_AMD64 || TARGET_ARM64 return false; } /******************************************************************************** * * Returns true if the current opcode and and the opcodes following it correspond * to a supported tail call IL pattern. * */ bool Compiler::impIsTailCallILPattern( bool tailPrefixed, OPCODE curOpcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, bool isRecursive) { // Bail out if the current opcode is not a call. if (!impOpcodeIsCallOpcode(curOpcode)) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // If shared ret tail opt is not enabled, we will enable // it for recursive methods. if (isRecursive) #endif { // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the // sequence. Make sure we don't go past the end of the IL however. codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize); } // Bail out if there is no next opcode after call if (codeAddrOfNextOpcode >= codeEnd) { return false; } OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode); return (nextOpcode == CEE_RET); } /***************************************************************************** * * Determine whether the call could be converted to an implicit tail call * */ bool Compiler::impIsImplicitTailCallCandidate( OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive) { #if FEATURE_TAILCALL_OPT if (!opts.compTailCallOpt) { return false; } if (opts.OptimizationDisabled()) { return false; } // must not be tail prefixed if (prefixFlags & PREFIX_TAILCALL_EXPLICIT) { return false; } #if !FEATURE_TAILCALL_OPT_SHARED_RETURN // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN // must be call+ret or call+pop+ret if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive)) { return false; } return true; #else return false; #endif // FEATURE_TAILCALL_OPT } //------------------------------------------------------------------------ // impImportCall: import a call-inspiring opcode // // Arguments: // opcode - opcode that inspires the call // pResolvedToken - resolved token for the call target // pConstrainedResolvedToken - resolved constraint token (or nullptr) // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr) // prefixFlags - IL prefix flags for the call // callInfo - EE supplied info for the call // rawILOffset - IL offset of the opcode, used for guarded devirtualization. // // Returns: // Type of the call's return value. // If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF. // However we can't assert for this here yet because there are cases we miss. See issue #13272. // // // Notes: // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ. // // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated // uninitalized object. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif var_types Compiler::impImportCall(OPCODE opcode, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, GenTree* newobjThis, int prefixFlags, CORINFO_CALL_INFO* callInfo, IL_OFFSET rawILOffset) { assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI); // The current statement DI may not refer to the exact call, but for calls // we wish to be able to attach the exact IL instruction to get "return // value" support in the debugger, so create one with the exact IL offset. DebugInfo di = impCreateDIWithCurrentStackInfo(rawILOffset, true); var_types callRetTyp = TYP_COUNT; CORINFO_SIG_INFO* sig = nullptr; CORINFO_METHOD_HANDLE methHnd = nullptr; CORINFO_CLASS_HANDLE clsHnd = nullptr; unsigned clsFlags = 0; unsigned mflags = 0; GenTree* call = nullptr; GenTreeCall::Use* args = nullptr; CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM; CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr; bool exactContextNeedsRuntimeLookup = false; bool canTailCall = true; const char* szCanTailCallFailReason = nullptr; const int tailCallFlags = (prefixFlags & PREFIX_TAILCALL); const bool isReadonlyCall = (prefixFlags & PREFIX_READONLY) != 0; CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr; // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could // do that before tailcalls, but that is probably not the intended // semantic. So just disallow tailcalls from synchronized methods. // Also, popping arguments in a varargs function is more work and NYI // If we have a security object, we have to keep our frame around for callers // to see any imperative security. // Reverse P/Invokes need a call to CORINFO_HELP_JIT_REVERSE_PINVOKE_EXIT // at the end, so tailcalls should be disabled. if (info.compFlags & CORINFO_FLG_SYNCH) { canTailCall = false; szCanTailCallFailReason = "Caller is synchronized"; } else if (opts.IsReversePInvoke()) { canTailCall = false; szCanTailCallFailReason = "Caller is Reverse P/Invoke"; } #if !FEATURE_FIXED_OUT_ARGS else if (info.compIsVarArgs) { canTailCall = false; szCanTailCallFailReason = "Caller is varargs"; } #endif // FEATURE_FIXED_OUT_ARGS // We only need to cast the return value of pinvoke inlined calls that return small types // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there. // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for // the time being that the callee might be compiled by the other JIT and thus the return // value will need to be widened by us (or not widened at all...) // ReadyToRun code sticks with default calling convention that does not widen small return types. bool checkForSmallType = opts.IsReadyToRun(); bool bIntrinsicImported = false; CORINFO_SIG_INFO calliSig; GenTreeCall::Use* extraArg = nullptr; /*------------------------------------------------------------------------- * First create the call node */ if (opcode == CEE_CALLI) { if (IsTargetAbi(CORINFO_CORERT_ABI)) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block))) { eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo); return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset); } } /* Get the call site sig */ eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig); callRetTyp = JITtype2varType(calliSig.retType); call = impImportIndirectCall(&calliSig, di); // We don't know the target method, so we have to infer the flags, or // assume the worst-case. mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif sig = &calliSig; } else // (opcode != CEE_CALLI) { NamedIntrinsic ni = NI_Illegal; // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to // supply the instantiation parameters necessary to make direct calls to underlying // shared generic code, rather than calling through instantiating stubs. If the // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT // must indeed pass an instantiation parameter. methHnd = callInfo->hMethod; sig = &(callInfo->sig); callRetTyp = JITtype2varType(sig->retType); mflags = callInfo->methodFlags; #ifdef DEBUG if (verbose) { unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0; printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n", opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize); } #endif if (compIsForInlining()) { /* Does the inlinee use StackCrawlMark */ if (mflags & CORINFO_FLG_DONT_INLINE_CALLER) { compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK); return TYP_UNDEF; } /* For now ignore varargs */ if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS); return TYP_UNDEF; } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return TYP_UNDEF; } if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT)) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL); return TYP_UNDEF; } } clsHnd = pResolvedToken->hClass; clsFlags = callInfo->classFlags; #ifdef DEBUG // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute. // This recognition should really be done by knowing the methHnd of the relevant Mark method(s). // These should be in corelib.h, and available through a JIT/EE interface call. const char* modName; const char* className; const char* methodName; if ((className = eeGetClassName(clsHnd)) != nullptr && strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 && (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0) { return impImportJitTestLabelMark(sig->numArgs); } #endif // DEBUG // <NICE> Factor this into getCallInfo </NICE> bool isSpecialIntrinsic = false; if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_INTRINSIC)) != 0) { const bool isTailCall = canTailCall && (tailCallFlags != 0); call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, isReadonlyCall, isTailCall, pConstrainedResolvedToken, callInfo->thisTransform, &ni, &isSpecialIntrinsic); if (compDonotInline()) { return TYP_UNDEF; } if (call != nullptr) { #ifdef FEATURE_READYTORUN if (call->OperGet() == GT_INTRINSIC) { if (opts.IsReadyToRun()) { noway_assert(callInfo->kind == CORINFO_CALL); call->AsIntrinsic()->gtEntryPoint = callInfo->codePointerLookup.constLookup; } else { call->AsIntrinsic()->gtEntryPoint.addr = nullptr; call->AsIntrinsic()->gtEntryPoint.accessType = IAT_VALUE; } } #endif bIntrinsicImported = true; goto DONE_CALL; } } #ifdef FEATURE_SIMD if (featureSIMD) { call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token); if (call != nullptr) { bIntrinsicImported = true; goto DONE_CALL; } } #endif // FEATURE_SIMD if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT)) { NO_WAY("Virtual call to a function added via EnC is not supported"); } if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG) { BADCODE("Bad calling convention"); } //------------------------------------------------------------------------- // Construct the call node // // Work out what sort of call we're making. // Dispense with virtual calls implemented via LDVIRTFTN immediately. constraintCallThisTransform = callInfo->thisTransform; exactContextHnd = callInfo->contextHandle; exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup; switch (callInfo->kind) { case CORINFO_VIRTUALCALL_STUB: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); if (callInfo->stubLookup.lookupKind.needsRuntimeLookup) { if (callInfo->stubLookup.lookupKind.runtimeLookupKind == CORINFO_LOOKUP_NOT_SUPPORTED) { // Runtime does not support inlining of all shapes of runtime lookups // Inlining has to be aborted in such a case compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE); return TYP_UNDEF; } GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd); assert(!compDonotInline()); // This is the rough code to set up an indirect stub call assert(stubAddr != nullptr); // The stubAddr may be a // complex expression. As it is evaluated after the args, // it may cause registered args to be spilled. Simply spill it. unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup")); impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_NONE); stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr); call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT); call->gtFlags |= GTF_CALL_VIRT_STUB; #ifdef TARGET_X86 // No tailcalls allowed for these yet... canTailCall = false; szCanTailCallFailReason = "VirtualCall with runtime lookup"; #endif } else { // The stub address is known at compile time call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->AsCall()->gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr; call->gtFlags |= GTF_CALL_VIRT_STUB; assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE && callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE); if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT; } } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is sometimes needed for ready to run to handle // non-virtual <-> virtual changes between versions if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } } #endif break; } case CORINFO_VIRTUALCALL_VTABLE: { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); call->gtFlags |= GTF_CALL_VIRT_VTABLE; // Should we expand virtual call targets early for this method? // if (opts.compExpandCallsEarly) { // Mark this method to expand the virtual call target early in fgMorpgCall call->AsCall()->SetExpandedEarly(); } break; } case CORINFO_VIRTUALCALL_LDVIRTFTN: { if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN); return TYP_UNDEF; } assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(!(clsFlags & CORINFO_FLG_VALUECLASS)); // OK, We've been told to call via LDVIRTFTN, so just // take the call now.... GenTreeCall::Use* args = impPopCallArgs(sig->numArgs, sig); GenTree* thisPtr = impPopStack().val; thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform); assert(thisPtr != nullptr); // Clone the (possibly transformed) "this" pointer GenTree* thisPtrCopy; thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("LDVIRTFTN this pointer")); GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo); assert(fptr != nullptr); thisPtr = nullptr; // can't reuse it // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); // Create the actual call node call = gtNewIndCallNode(fptr, callRetTyp, args, di); call->AsCall()->gtCallThisArg = gtNewCallArgs(thisPtrCopy); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI)) { // CoreRT generic virtual method: need to handle potential fat function pointers addFatPointerCandidate(call->AsCall()); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // Null check is needed for ready to run to handle // non-virtual <-> virtual changes between versions call->gtFlags |= GTF_CALL_NULLCHECK; } #endif // Sine we are jumping over some code, check that its OK to skip that code assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG && (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); goto DONE; } case CORINFO_CALL: { // This is for a non-virtual, non-interface etc. call call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, di); // We remove the nullcheck for the GetType call intrinsic. // TODO-CQ: JIT64 does not introduce the null check for many more helper calls // and intrinsics. if (callInfo->nullInstanceCheck && !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (ni == NI_System_Object_GetType))) { call->gtFlags |= GTF_CALL_NULLCHECK; } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { call->AsCall()->setEntryPoint(callInfo->codePointerLookup.constLookup); } #endif break; } case CORINFO_CALL_CODE_POINTER: { // The EE has asked us to call by computing a code pointer and then doing an // indirect call. This is because a runtime lookup is required to get the code entry point. // These calls always follow a uniform calling convention, i.e. no extra hidden params assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG); assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG); GenTree* fptr = impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod); if (compDonotInline()) { return TYP_UNDEF; } // Now make an indirect call through the function pointer unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer")); impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL); fptr = gtNewLclvNode(lclNum, TYP_I_IMPL); call = gtNewIndCallNode(fptr, callRetTyp, nullptr, di); call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT); if (callInfo->nullInstanceCheck) { call->gtFlags |= GTF_CALL_NULLCHECK; } break; } default: assert(!"unknown call kind"); break; } //------------------------------------------------------------------------- // Set more flags PREFIX_ASSUME(call != nullptr); if (mflags & CORINFO_FLG_NOGCCHECK) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK; } // Mark call if it's one of the ones we will maybe treat as an intrinsic if (isSpecialIntrinsic) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC; } } assert(sig); assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set. /* Some sanity checks */ // CALL_VIRT and NEWOBJ must have a THIS pointer assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS)); // static bit and hasThis are negations of one another assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0)); assert(call != nullptr); /*------------------------------------------------------------------------- * Check special-cases etc */ /* Special case - Check if it is a call to Delegate.Invoke(). */ if (mflags & CORINFO_FLG_DELEGATE_INVOKE) { assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method assert(mflags & CORINFO_FLG_FINAL); /* Set the delegate flag */ call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV; if (callInfo->wrapperDelegateInvoke) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_WRAPPER_DELEGATE_INV; } if (opcode == CEE_CALLVIRT) { assert(mflags & CORINFO_FLG_FINAL); /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */ assert(call->gtFlags & GTF_CALL_NULLCHECK); call->gtFlags &= ~GTF_CALL_NULLCHECK; } } CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass; actualMethodRetTypeSigClass = sig->retTypeSigClass; /* Check for varargs */ if (!compFeatureVarArg() && ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)) { BADCODE("Varargs not supported."); } if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG || (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG) { assert(!compIsForInlining()); /* Set the right flags */ call->gtFlags |= GTF_CALL_POP_ARGS; call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_VARARGS; /* Can't allow tailcall for varargs as it is caller-pop. The caller will be expecting to pop a certain number of arguments, but if we tailcall to a function with a different number of arguments, we are hosed. There are ways around this (caller remembers esp value, varargs is not caller-pop, etc), but not worth it. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_X86 if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is varargs"; } #endif /* Get the total number of arguments - this is already correct * for CALLI - for methods we have to get it from the call site */ if (opcode != CEE_CALLI) { #ifdef DEBUG unsigned numArgsDef = sig->numArgs; #endif eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); // For vararg calls we must be sure to load the return type of the // method actually being called, as well as the return types of the // specified in the vararg signature. With type equivalency, these types // may not be the same. if (sig->retTypeSigClass != actualMethodRetTypeSigClass) { if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS && sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR) { // Make sure that all valuetypes (including enums) that we push are loaded. // This is to guarantee that if a GC is triggerred from the prestub of this methods, // all valuetypes in the method signature are already loaded. // We need to be able to find the size of the valuetypes, but we cannot // do a class-load from within GC. info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass); } } assert(numArgsDef <= sig->numArgs); } /* We will have "cookie" as the last argument but we cannot push * it on the operand stack because we may overflow, so we append it * to the arg list next after we pop them */ } //--------------------------- Inline NDirect ------------------------------ // For inline cases we technically should look at both the current // block and the call site block (or just the latter if we've // fused the EH trees). However the block-related checks pertain to // EH and we currently won't inline a method with EH. So for // inlinees, just checking the call site block is sufficient. { // New lexical block here to avoid compilation errors because of GOTOs. BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block); } #ifdef UNIX_X86_ABI // On Unix x86 we use caller-cleaned convention. if ((call->gtFlags & GTF_CALL_UNMANAGED) == 0) call->gtFlags |= GTF_CALL_POP_ARGS; #endif // UNIX_X86_ABI if (call->gtFlags & GTF_CALL_UNMANAGED) { // We set up the unmanaged call by linking the frame, disabling GC, etc // This needs to be cleaned up on return. // In addition, native calls have different normalization rules than managed code // (managed calling convention always widens return values in the callee) if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "Callee is native"; } checkForSmallType = true; impPopArgsForUnmanagedCall(call, sig); goto DONE; } else if ((opcode == CEE_CALLI) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT) && ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG)) { if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig)) { // Normally this only happens with inlining. // However, a generic method (or type) being NGENd into another module // can run into this issue as well. There's not an easy fall-back for NGEN // so instead we fallback to JIT. if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE); } else { IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)"); } return TYP_UNDEF; } GenTree* cookie = eeGetPInvokeCookie(sig); // This cookie is required to be either a simple GT_CNS_INT or // an indirection of a GT_CNS_INT // GenTree* cookieConst = cookie; if (cookie->gtOper == GT_IND) { cookieConst = cookie->AsOp()->gtOp1; } assert(cookieConst->gtOper == GT_CNS_INT); // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that // we won't allow this tree to participate in any CSE logic // cookie->gtFlags |= GTF_DONT_CSE; cookieConst->gtFlags |= GTF_DONT_CSE; call->AsCall()->gtCallCookie = cookie; if (canTailCall) { canTailCall = false; szCanTailCallFailReason = "PInvoke calli"; } } /*------------------------------------------------------------------------- * Create the argument list */ //------------------------------------------------------------------------- // Special case - for varargs we have an implicit last argument if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG) { assert(!compIsForInlining()); void *varCookie, *pVarCookie; if (!info.compCompHnd->canGetVarArgsHandle(sig)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE); return TYP_UNDEF; } varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie); assert((!varCookie) != (!pVarCookie)); GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig); assert(extraArg == nullptr); extraArg = gtNewCallArgs(cookie); } //------------------------------------------------------------------------- // Extra arg for shared generic code and array methods // // Extra argument containing instantiation information is passed in the // following circumstances: // (a) To the "Address" method on array classes; the extra parameter is // the array's type handle (a TypeDesc) // (b) To shared-code instance methods in generic structs; the extra parameter // is the struct's type handle (a vtable ptr) // (c) To shared-code per-instantiation non-generic static methods in generic // classes and structs; the extra parameter is the type handle // (d) To shared-code generic methods; the extra parameter is an // exact-instantiation MethodDesc // // We also set the exact type context associated with the call so we can // inline the call correctly later on. if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE) { assert(call->AsCall()->gtCallType == CT_USER_FUNC); if (clsHnd == nullptr) { NO_WAY("CALLI on parameterized type"); } assert(opcode != CEE_CALLI); GenTree* instParam; bool runtimeLookup; // Instantiated generic method if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD) { assert(exactContextHnd != METHOD_BEING_COMPILED_CONTEXT()); CORINFO_METHOD_HANDLE exactMethodHandle = (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK); if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbMethHndNode(exactMethodHandle); info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle); } } else { instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } // otherwise must be an instance method in a generic struct, // a static method in a generic type, or a runtime-generated array method else { assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); CORINFO_CLASS_HANDLE exactClassHandle = eeGetClassFromContext(exactContextHnd); if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0) { compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD); return TYP_UNDEF; } if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall) { // We indicate "readonly" to the Address operation by using a null // instParam. instParam = gtNewIconNode(0, TYP_REF); } else if (!exactContextNeedsRuntimeLookup) { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { instParam = impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } else #endif { instParam = gtNewIconEmbClsHndNode(exactClassHandle); info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle); } } else { instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, true /*mustRestoreHandle*/); if (instParam == nullptr) { assert(compDonotInline()); return TYP_UNDEF; } } } assert(extraArg == nullptr); extraArg = gtNewCallArgs(instParam); } if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0)) { // Only verifiable cases are supported. // dup; ldvirtftn; newobj; or ldftn; newobj. // IL test could contain unverifiable sequence, in this case optimization should not be done. if (impStackHeight() > 0) { typeInfo delegateTypeInfo = impStackTop().seTypeInfo; if (delegateTypeInfo.IsToken()) { ldftnToken = delegateTypeInfo.GetToken(); } } } //------------------------------------------------------------------------- // The main group of arguments args = impPopCallArgs(sig->numArgs, sig, extraArg); call->AsCall()->gtCallArgs = args; for (GenTreeCall::Use& use : call->AsCall()->Args()) { call->gtFlags |= use.GetNode()->gtFlags & GTF_GLOB_EFFECT; } //------------------------------------------------------------------------- // The "this" pointer if (((mflags & CORINFO_FLG_STATIC) == 0) && ((sig->callConv & CORINFO_CALLCONV_EXPLICITTHIS) == 0) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr))) { GenTree* obj; if (opcode == CEE_NEWOBJ) { obj = newobjThis; } else { obj = impPopStack().val; obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform); if (compDonotInline()) { return TYP_UNDEF; } } // Store the "this" value in the call call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT; call->AsCall()->gtCallThisArg = gtNewCallArgs(obj); // Is this a virtual or interface call? if (call->AsCall()->IsVirtual()) { // only true object pointers can be virtual assert(obj->gtType == TYP_REF); // See if we can devirtualize. const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isLateDevirtualization = false; impDevirtualizeCall(call->AsCall(), pResolvedToken, &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle, &exactContextHnd, isLateDevirtualization, isExplicitTailCall, // Take care to pass raw IL offset here as the 'debug info' might be different for // inlinees. rawILOffset); // Devirtualization may change which method gets invoked. Update our local cache. // methHnd = callInfo->hMethod; } if (impIsThis(obj)) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS; } } //------------------------------------------------------------------------- // The "this" pointer for "newobj" if (opcode == CEE_NEWOBJ) { if (clsFlags & CORINFO_FLG_VAROBJSIZE) { assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately // This is a 'new' of a variable sized object, wher // the constructor is to return the object. In this case // the constructor claims to return VOID but we know it // actually returns the new object assert(callRetTyp == TYP_VOID); callRetTyp = TYP_REF; call->gtType = TYP_REF; impSpillSpecialSideEff(); impPushOnStack(call, typeInfo(TI_REF, clsHnd)); } else { if (clsFlags & CORINFO_FLG_DELEGATE) { // New inliner morph it in impImportCall. // This will allow us to inline the call to the delegate constructor. call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken); } if (!bIntrinsicImported) { #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // append the call node. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Now push the value of the 'new onto the stack // This is a 'new' of a non-variable sized object. // Append the new node (op1) to the statement list, // and then push the local holding the value of this // new instruction on the stack. if (clsFlags & CORINFO_FLG_VALUECLASS) { assert(newobjThis->gtOper == GT_ADDR && newobjThis->AsOp()->gtOp1->gtOper == GT_LCL_VAR); unsigned tmp = newobjThis->AsOp()->gtOp1->AsLclVarCommon()->GetLclNum(); impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack()); } else { if (newobjThis->gtOper == GT_COMMA) { // We must have inserted the callout. Get the real newobj. newobjThis = newobjThis->AsOp()->gtOp2; } assert(newobjThis->gtOper == GT_LCL_VAR); impPushOnStack(gtNewLclvNode(newobjThis->AsLclVarCommon()->GetLclNum(), TYP_REF), typeInfo(TI_REF, clsHnd)); } } return callRetTyp; } DONE: #ifdef DEBUG // In debug we want to be able to register callsites with the EE. assert(call->AsCall()->callSig == nullptr); call->AsCall()->callSig = new (this, CMK_Generic) CORINFO_SIG_INFO; *call->AsCall()->callSig = *sig; #endif // Final importer checks for calls flagged as tail calls. // if (tailCallFlags != 0) { const bool isExplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_EXPLICIT) != 0; const bool isImplicitTailCall = (tailCallFlags & PREFIX_TAILCALL_IMPLICIT) != 0; const bool isStressTailCall = (tailCallFlags & PREFIX_TAILCALL_STRESS) != 0; // Exactly one of these should be true. assert(isExplicitTailCall != isImplicitTailCall); // This check cannot be performed for implicit tail calls for the reason // that impIsImplicitTailCallCandidate() is not checking whether return // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT. // As a result it is possible that in the following case, we find that // the type stack is non-empty if Callee() is considered for implicit // tail calling. // int Caller(..) { .... void Callee(); ret val; ... } // // Note that we cannot check return type compatibility before ImpImportCall() // as we don't have required info or need to duplicate some of the logic of // ImpImportCall(). // // For implicit tail calls, we perform this check after return types are // known to be compatible. if (isExplicitTailCall && (verCurrentState.esStackDepth != 0)) { BADCODE("Stack should be empty after tailcall"); } // For opportunistic tailcalls we allow implicit widening, i.e. tailcalls from int32 -> int16, since the // managed calling convention dictates that the callee widens the value. For explicit tailcalls we don't // want to require this detail of the calling convention to bubble up to the tailcall helpers bool allowWidening = isImplicitTailCall; if (canTailCall && !impTailCallRetTypeCompatible(allowWidening, info.compRetType, info.compMethodInfo->args.retTypeClass, info.compCallConv, callRetTyp, sig->retTypeClass, call->AsCall()->GetUnmanagedCallConv())) { canTailCall = false; szCanTailCallFailReason = "Return types are not tail call compatible"; } // Stack empty check for implicit tail calls. if (canTailCall && isImplicitTailCall && (verCurrentState.esStackDepth != 0)) { #ifdef TARGET_AMD64 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException // in JIT64, not an InvalidProgramException. Verify(false, "Stack should be empty after tailcall"); #else // TARGET_64BIT BADCODE("Stack should be empty after tailcall"); #endif //! TARGET_64BIT } // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN); // Ask VM for permission to tailcall if (canTailCall) { // True virtual or indirect calls, shouldn't pass in a callee handle. CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->AsCall()->gtCallType != CT_USER_FUNC) || call->AsCall()->IsVirtual()) ? nullptr : methHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, isExplicitTailCall)) { if (isExplicitTailCall) { // In case of explicit tail calls, mark it so that it is not considered // for in-lining. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_EXPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); if (isStressTailCall) { call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_STRESS_TAILCALL; JITDUMP("\nGTF_CALL_M_STRESS_TAILCALL set for call [%06u]\n", dspTreeID(call)); } } else { #if FEATURE_TAILCALL_OPT // Must be an implicit tail call. assert(isImplicitTailCall); // It is possible that a call node is both an inline candidate and marked // for opportunistic tail calling. In-lining happens before morhphing of // trees. If in-lining of an in-line candidate gets aborted for whatever // reason, it will survive to the morphing stage at which point it will be // transformed into a tail call after performing additional checks. call->AsCall()->gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL; JITDUMP("\nGTF_CALL_M_IMPLICIT_TAILCALL set for call [%06u]\n", dspTreeID(call)); #else //! FEATURE_TAILCALL_OPT NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls"); #endif // FEATURE_TAILCALL_OPT } // This might or might not turn into a tailcall. We do more // checks in morph. For explicit tailcalls we need more // information in morph in case it turns out to be a // helper-based tailcall. if (isExplicitTailCall) { assert(call->AsCall()->tailCallInfo == nullptr); call->AsCall()->tailCallInfo = new (this, CMK_CorTailCallInfo) TailCallSiteInfo; switch (opcode) { case CEE_CALLI: call->AsCall()->tailCallInfo->SetCalli(sig); break; case CEE_CALLVIRT: call->AsCall()->tailCallInfo->SetCallvirt(sig, pResolvedToken); break; default: call->AsCall()->tailCallInfo->SetCall(sig, pResolvedToken); break; } } } else { // canTailCall reported its reasons already canTailCall = false; JITDUMP("\ninfo.compCompHnd->canTailCall returned false for call [%06u]\n", dspTreeID(call)); } } else { // If this assert fires it means that canTailCall was set to false without setting a reason! assert(szCanTailCallFailReason != nullptr); JITDUMP("\nRejecting %splicit tail call for [%06u], reason: '%s'\n", isExplicitTailCall ? "ex" : "im", dspTreeID(call), szCanTailCallFailReason); info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, isExplicitTailCall, TAILCALL_FAIL, szCanTailCallFailReason); } } // Note: we assume that small return types are already normalized by the managed callee // or by the pinvoke stub for calls to unmanaged code. if (!bIntrinsicImported) { // // Things needed to be checked when bIntrinsicImported is false. // assert(call->gtOper == GT_CALL); assert(callInfo != nullptr); if (compIsForInlining() && opcode == CEE_CALLVIRT) { GenTree* callObj = call->AsCall()->gtCallThisArg->GetNode(); if ((call->AsCall()->IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, call->AsCall()->gtCallArgs, callObj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } #if defined(DEBUG) || defined(INLINE_DATA) // Keep track of the raw IL offset of the call call->AsCall()->gtRawILOffset = rawILOffset; #endif // defined(DEBUG) || defined(INLINE_DATA) // Is it an inline candidate? impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); } // Extra checks for tail calls and tail recursion. // // A tail recursive call is a potential loop from the current block to the start of the root method. // If we see a tail recursive call, mark the blocks from the call site back to the entry as potentially // being in a loop. // // Note: if we're importing an inlinee we don't mark the right set of blocks, but by then it's too // late. Currently this doesn't lead to problems. See GitHub issue 33529. // // OSR also needs to handle tail calls specially: // * block profiling in OSR methods needs to ensure probes happen before tail calls, not after. // * the root method entry must be imported if there's a recursive tail call or a potentially // inlineable tail call. // if ((tailCallFlags != 0) && canTailCall) { if (gtIsRecursiveCall(methHnd)) { assert(verCurrentState.esStackDepth == 0); BasicBlock* loopHead = nullptr; if (!compIsForInlining() && opts.IsOSR()) { // For root method OSR we may branch back to the actual method entry, // which is not fgFirstBB, and which we will need to import. assert(fgEntryBB != nullptr); loopHead = fgEntryBB; } else { // For normal jitting we may branch back to the firstBB; this // should already be imported. loopHead = fgFirstBB; } JITDUMP("\nTail recursive call [%06u] in the method. Mark " FMT_BB " to " FMT_BB " as having a backward branch.\n", dspTreeID(call), loopHead->bbNum, compCurBB->bbNum); fgMarkBackwardJump(loopHead, compCurBB); } // We only do these OSR checks in the root method because: // * If we fail to import the root method entry when importing the root method, we can't go back // and import it during inlining. So instead of checking jsut for recursive tail calls we also // have to check for anything that might introduce a recursive tail call. // * We only instrument root method blocks in OSR methods, // if (opts.IsOSR() && !compIsForInlining()) { // If a root method tail call candidate block is not a BBJ_RETURN, it should have a unique // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // if (compCurBB->bbJumpKind != BBJ_RETURN) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); assert(successor->bbJumpKind == BBJ_RETURN); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } // If this call might eventually turn into a loop back to method entry, make sure we // import the method entry. // assert(call->IsCall()); GenTreeCall* const actualCall = call->AsCall(); const bool mustImportEntryBlock = gtIsRecursiveCall(methHnd) || actualCall->IsInlineCandidate() || actualCall->IsGuardedDevirtualizationCandidate(); // Only schedule importation if we're not currently importing. // if (mustImportEntryBlock && (compCurBB != fgEntryBB)) { JITDUMP("\nOSR: inlineable or recursive tail call [%06u] in the method, so scheduling " FMT_BB " for importation\n", dspTreeID(call), fgEntryBB->bbNum); impImportBlockPending(fgEntryBB); } } } if ((sig->flags & CORINFO_SIGFLAG_FAT_CALL) != 0) { assert(opcode == CEE_CALLI || callInfo->kind == CORINFO_CALL_CODE_POINTER); addFatPointerCandidate(call->AsCall()); } DONE_CALL: // Push or append the result of the call if (callRetTyp == TYP_VOID) { if (opcode == CEE_NEWOBJ) { // we actually did push something, so don't spill the thing we just pushed. assert(verCurrentState.esStackDepth > 0); impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtDI); } else { impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } } else { impSpillSpecialSideEff(); if (clsFlags & CORINFO_FLG_ARRAY) { eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig); } typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass); tiRetVal.NormaliseForStack(); // The CEE_READONLY prefix modifies the verification semantics of an Address // operation on an array type. if ((clsFlags & CORINFO_FLG_ARRAY) && isReadonlyCall && tiRetVal.IsByRef()) { tiRetVal.SetIsReadonlyByRef(); } if (call->IsCall()) { // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call) GenTreeCall* origCall = call->AsCall(); const bool isFatPointerCandidate = origCall->IsFatPointerCandidate(); const bool isInlineCandidate = origCall->IsInlineCandidate(); const bool isGuardedDevirtualizationCandidate = origCall->IsGuardedDevirtualizationCandidate(); if (varTypeIsStruct(callRetTyp)) { // Need to treat all "split tree" cases here, not just inline candidates call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass); } // TODO: consider handling fatcalli cases this way too...? if (isInlineCandidate || isGuardedDevirtualizationCandidate) { // We should not have made any adjustments in impFixupCallStructReturn // as we defer those until we know the fate of the call. assert(call == origCall); assert(opts.OptEnabled(CLFLG_INLINING)); assert(!isFatPointerCandidate); // We should not try to inline calli. // Make the call its own tree (spill the stack if needed). // Do not consume the debug info here. This is particularly // important if we give up on the inline, in which case the // call will typically end up in the statement that contains // the GT_RET_EXPR that we leave on the stack. impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtDI, false); // TODO: Still using the widened type. GenTree* retExpr = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp), compCurBB->bbFlags); // Link the retExpr to the call so if necessary we can manipulate it later. origCall->gtInlineCandidateInfo->retExpr = retExpr; // Propagate retExpr as the placeholder for the call. call = retExpr; } else { // If the call is virtual, and has a generics context, and is not going to have a class probe, // record the context for possible use during late devirt. // // If we ever want to devirt at Tier0, and/or see issues where OSR methods under PGO lose // important devirtualizations, we'll want to allow both a class probe and a captured context. // if (origCall->IsVirtual() && (origCall->gtCallType != CT_INDIRECT) && (exactContextHnd != nullptr) && (origCall->gtClassProfileCandidateInfo == nullptr)) { JITDUMP("\nSaving context %p for call [%06u]\n", exactContextHnd, dspTreeID(origCall)); origCall->gtCallMoreFlags |= GTF_CALL_M_LATE_DEVIRT; LateDevirtualizationInfo* const info = new (this, CMK_Inlining) LateDevirtualizationInfo; info->exactContextHnd = exactContextHnd; origCall->gtLateDevirtualizationInfo = info; } if (isFatPointerCandidate) { // fatPointer candidates should be in statements of the form call() or var = call(). // Such form allows to find statements with fat calls without walking through whole trees // and removes problems with cutting trees. assert(!bIntrinsicImported); assert(IsTargetAbi(CORINFO_CORERT_ABI)); if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn. { unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli")); LclVarDsc* varDsc = lvaGetDesc(calliSlot); varDsc->lvVerTypeInfo = tiRetVal; impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE); // impAssignTempGen can change src arg list and return type for call that returns struct. var_types type = genActualType(lvaTable[calliSlot].TypeGet()); call = gtNewLclvNode(calliSlot, type); } } // For non-candidates we must also spill, since we // might have locals live on the eval stack that this // call can modify. // // Suppress this for certain well-known call targets // that we know won't modify locals, eg calls that are // recognized in gtCanOptimizeTypeEquality. Otherwise // we may break key fragile pattern matches later on. bool spillStack = true; if (call->IsCall()) { GenTreeCall* callNode = call->AsCall(); if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) || gtIsTypeHandleToRuntimeTypeHandleHelper(callNode))) { spillStack = false; } else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0) { spillStack = false; } } if (spillStack) { impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call")); } } } if (!bIntrinsicImported) { //------------------------------------------------------------------------- // /* If the call is of a small type and the callee is managed, the callee will normalize the result before returning. However, we need to normalize small type values returned by unmanaged functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here if we use the shorter inlined pinvoke stub. */ if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT)) { call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp); } } impPushOnStack(call, tiRetVal); } // VSD functions get a new call target each time we getCallInfo, so clear the cache. // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out. // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB)) // callInfoCache.uncacheCallInfo(); return callRetTyp; } #ifdef _PREFAST_ #pragma warning(pop) #endif bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo, CorInfoCallConvExtension callConv) { CorInfoType corType = methInfo->args.retType; if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY)) { // We have some kind of STRUCT being returned structPassingKind howToReturnStruct = SPK_Unknown; var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, callConv, &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { return true; } } return false; } #ifdef DEBUG // var_types Compiler::impImportJitTestLabelMark(int numArgs) { TestLabelAndNum tlAndN; if (numArgs == 2) { tlAndN.m_num = 0; StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else if (numArgs == 3) { StackEntry se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); GenTree* val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_num = val->AsIntConCommon()->IconValue(); se = impPopStack(); assert(se.seTypeInfo.GetType() == TI_INT); val = se.val; assert(val->IsCnsIntOrI()); tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue(); } else { assert(false); } StackEntry expSe = impPopStack(); GenTree* node = expSe.val; // There are a small number of special cases, where we actually put the annotation on a subnode. if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100) { // A loop hoist annotation with value >= 100 means that the expression should be a static field access, // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some // offset within the the static field block whose address is returned by the helper call. // The annotation is saying that this address calculation, but not the entire access, should be hoisted. assert(node->OperGet() == GT_IND); tlAndN.m_num -= 100; GetNodeTestData()->Set(node->AsOp()->gtOp1, tlAndN); GetNodeTestData()->Remove(node); } else { GetNodeTestData()->Set(node, tlAndN); } impPushOnStack(node, expSe.seTypeInfo); return node->TypeGet(); } #endif // DEBUG //----------------------------------------------------------------------------------- // impFixupCallStructReturn: For a call node that returns a struct do one of the following: // - set the flag to indicate struct return via retbuf arg; // - adjust the return type to a SIMD type if it is returned in 1 reg; // - spill call result into a temp if it is returned into 2 registers or more and not tail call or inline candidate. // // Arguments: // call - GT_CALL GenTree node // retClsHnd - Class handle of return type of the call // // Return Value: // Returns new GenTree node after fixing struct return of call node // GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd) { if (!varTypeIsStruct(call)) { return call; } call->gtRetClsHnd = retClsHnd; #if FEATURE_MULTIREG_RET call->InitializeStructReturnType(this, retClsHnd, call->GetUnmanagedCallConv()); const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc(); const unsigned retRegCount = retTypeDesc->GetReturnRegCount(); #else // !FEATURE_MULTIREG_RET const unsigned retRegCount = 1; #endif // !FEATURE_MULTIREG_RET structPassingKind howToReturnStruct; var_types returnType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct); if (howToReturnStruct == SPK_ByReference) { assert(returnType == TYP_UNKNOWN); call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG; return call; } // Recognize SIMD types as we do for LCL_VARs, // note it could be not the ABI specific type, for example, on x64 we can set 'TYP_SIMD8` // for `System.Numerics.Vector2` here but lower will change it to long as ABI dictates. var_types simdReturnType = impNormStructType(call->gtRetClsHnd); if (simdReturnType != call->TypeGet()) { assert(varTypeIsSIMD(simdReturnType)); JITDUMP("changing the type of a call [%06u] from %s to %s\n", dspTreeID(call), varTypeName(call->TypeGet()), varTypeName(simdReturnType)); call->ChangeType(simdReturnType); } if (retRegCount == 1) { return call; } #if FEATURE_MULTIREG_RET assert(varTypeIsStruct(call)); // It could be a SIMD returned in several regs. assert(returnType == TYP_STRUCT); assert((howToReturnStruct == SPK_ByValueAsHfa) || (howToReturnStruct == SPK_ByValue)); #ifdef UNIX_AMD64_ABI // must be a struct returned in two registers assert(retRegCount == 2); #else // not UNIX_AMD64_ABI assert(retRegCount >= 2); #endif // not UNIX_AMD64_ABI if (!call->CanTailCall() && !call->IsInlineCandidate()) { // Force a call returning multi-reg struct to be always of the IR form // tmp = call // // No need to assign a multi-reg struct to a local var if: // - It is a tail call or // - The call is marked for in-lining later return impAssignMultiRegTypeToVar(call, retClsHnd DEBUGARG(call->GetUnmanagedCallConv())); } return call; #endif // FEATURE_MULTIREG_RET } /***************************************************************************** For struct return values, re-type the operand in the case where the ABI does not use a struct return buffer */ //------------------------------------------------------------------------ // impFixupStructReturnType: For struct return values it sets appropriate flags in MULTIREG returns case; // in non-multiref case it handles two special helpers: `CORINFO_HELP_GETFIELDSTRUCT`, `CORINFO_HELP_UNBOX_NULLABLE`. // // Arguments: // op - the return value; // retClsHnd - the struct handle; // unmgdCallConv - the calling convention of the function that returns this struct. // // Return Value: // the result tree that does the return. // GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension unmgdCallConv) { assert(varTypeIsStruct(info.compRetType)); assert(info.compRetBuffArg == BAD_VAR_NUM); JITDUMP("\nimpFixupStructReturnType: retyping\n"); DISPTREE(op); #if defined(TARGET_XARCH) #if FEATURE_MULTIREG_RET // No VarArgs for CoreCLR on x64 Unix UNIX_AMD64_ABI_ONLY(assert(!info.compIsVarArgs)); // Is method returning a multi-reg struct? if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { // In case of multi-reg struct return, we force IR to be one of the following: // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp). if (op->gtOper == GT_LCL_VAR) { // Note that this is a multi-reg return. unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { return op; } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #else assert(info.compRetNativeType != TYP_STRUCT); #endif // defined(UNIX_AMD64_ABI) || defined(TARGET_X86) #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM) if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); // Make sure this struct type stays as struct so that we can return it as an HFA lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #elif FEATURE_MULTIREG_RET && defined(TARGET_ARM64) // Is method returning a multi-reg struct? if (IsMultiRegReturnedType(retClsHnd, unmgdCallConv)) { if (op->gtOper == GT_LCL_VAR) { // This LCL_VAR stays as a TYP_STRUCT unsigned lclNum = op->AsLclVarCommon()->GetLclNum(); if (!lvaIsImplicitByRefLocal(lclNum)) { // Make sure this struct type is not struct promoted lvaTable[lclNum].lvIsMultiRegRet = true; // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. op->gtFlags |= GTF_DONT_CSE; return op; } } if (op->gtOper == GT_CALL) { if (op->AsCall()->IsVarargs()) { // We cannot tail call because control needs to return to fixup the calling // convention for result return. op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL; op->AsCall()->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL; } else { return op; } } return impAssignMultiRegTypeToVar(op, retClsHnd DEBUGARG(unmgdCallConv)); } #endif // FEATURE_MULTIREG_RET && TARGET_ARM64 if (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this)) { // Don't retype `struct` as a primitive type in `ret` instruction. return op; } // This must be one of those 'special' helpers that don't // really have a return buffer, but instead use it as a way // to keep the trees cleaner with fewer address-taken temps. // // Well now we have to materialize the the return buffer as // an address-taken temp. Then we can return the temp. // // NOTE: this code assumes that since the call directly // feeds the return, then the call must be returning the // same structure/class/type. // unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer")); // No need to spill anything as we're about to return. impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE); op = gtNewLclvNode(tmpNum, info.compRetType); JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n"); DISPTREE(op); return op; } /***************************************************************************** CEE_LEAVE may be jumping out of a protected block, viz, a catch or a finally-protected try. We find the finally blocks protecting the current offset (in order) by walking over the complete exception table and finding enclosing clauses. This assumes that the table is sorted. This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS. If we are leaving a catch handler, we need to attach the CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks. After this function, the BBJ_LEAVE block has been converted to a different type. */ #if !defined(FEATURE_EH_FUNCLETS) void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); unsigned encFinallies = 0; // Number of enclosing finallies. GenTree* endCatches = NULL; Statement* endLFinStmt = NULL; // The statement tree to indicate the end of locally-invoked finally. unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? * If so, we need to call CORINFO_HELP_ENDCATCH. */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) BADCODE("leave out of fault/finally block"); // Create the call to CORINFO_HELP_ENDCATCH GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID); // Make a list of all the currently pending endCatches if (endCatches) endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch); else endCatches = endCatch; #ifdef DEBUG if (verbose) { printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to " "CORINFO_HELP_ENDCATCH\n", block->bbNum, XTnum); } #endif } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* This is a finally-protected try we are jumping out of */ /* If there are any pending endCatches, and we have already jumped out of a finally-protected try, then the endCatches have to be put in a block in an outer try for async exceptions to work correctly. Else, just use append to the original block */ BasicBlock* callBlock; assert(!encFinallies == !endLFinStmt); // if we have finallies, we better have an endLFin tree, and vice-versa if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY " "block %s\n", callBlock->dspToString()); } #endif } else { assert(step != DUMMY_INIT(NULL)); /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); assert(step->bbJumpKind == BBJ_ALWAYS); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n", callBlock->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } // note that this sets BBF_IMPORTED on the block impEndTreeList(callBlock, endLFinStmt, lastStmt); } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n", step->dspToString()); } #endif unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel; assert(finallyNesting <= compHndBBtabCount); callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. GenTree* endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting); endLFinStmt = gtNewStmt(endLFin); endCatches = NULL; encFinallies++; invalidatePreds = true; } } /* Append any remaining endCatches, if any */ assert(!encFinallies == !endLFinStmt); if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS " "block %s\n", block->dspToString()); } #endif } else { // If leaveTarget is the start of another try block, we want to make sure that // we do not insert finalStep into that try block. Hence, we find the enclosing // try block. unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget); // Insert a new BB either in the try region indicated by tryIndex or // the handler region indicated by leaveTarget->bbHndIndex, // depending on which is the inner region. BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step); finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS; step->bbJumpDest = finalStep; /* The new block will inherit this block's weight */ finalStep->inheritWeight(block); #ifdef DEBUG if (verbose) { printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies, finalStep->dspToString()); } #endif Statement* lastStmt; if (endCatches) { lastStmt = gtNewStmt(endCatches); endLFinStmt->SetNextStmt(lastStmt); lastStmt->SetPrevStmt(endLFinStmt); } else { lastStmt = endLFinStmt; } impEndTreeList(finalStep, endLFinStmt, lastStmt); finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE // Queue up the jump target for importing impImportBlockPending(leaveTarget); invalidatePreds = true; } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #else // FEATURE_EH_FUNCLETS void Compiler::impImportLeave(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum, block->bbJumpDest->bbNum); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created) unsigned blkAddr = block->bbCodeOffs; BasicBlock* leaveTarget = block->bbJumpDest; unsigned jmpAddr = leaveTarget->bbCodeOffs; // LEAVE clears the stack, spill side effects, and set stack to 0 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; assert(block->bbJumpKind == BBJ_LEAVE); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; enum StepType { // No step type; step == NULL. ST_None, // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair? // That is, is step->bbJumpDest where a finally will return to? ST_FinallyReturn, // The step block is a catch return. ST_Catch, // The step block is in a "try", created as the target for a finally return or the target for a catch return. ST_Try }; StepType stepType = ST_None; unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Grab the handler offsets IL_OFFSET tryBeg = HBtab->ebdTryBegOffs(); IL_OFFSET tryEnd = HBtab->ebdTryEndOffs(); IL_OFFSET hndBeg = HBtab->ebdHndBegOffs(); IL_OFFSET hndEnd = HBtab->ebdHndEndOffs(); /* Is this a catch-handler we are CEE_LEAVEing out of? */ if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd)) { // Can't CEE_LEAVE out of a finally/fault handler if (HBtab->HasFinallyOrFaultHandler()) { BADCODE("leave out of fault/finally block"); } /* We are jumping out of a catch */ if (step == nullptr) { step = block; step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET stepType = ST_Catch; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB " to BBJ_EHCATCHRET " "block\n", XTnum, step->bbNum); } #endif } else { BasicBlock* exitBlock; /* Create a new catch exit block in the catch region for the existing step block to jump to in this * scope */ exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step); assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch // exit) returns to this block step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ exitBlock->inheritWeight(block); exitBlock->bbFlags |= BBF_IMPORTED; /* This exit block is the new step */ step = exitBlock; stepType = ST_Catch; invalidatePreds = true; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n", XTnum, exitBlock->bbNum); } #endif } } else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { /* We are jumping out of a finally-protected try */ BasicBlock* callBlock; if (step == nullptr) { #if FEATURE_EH_CALLFINALLY_THUNKS // Put the call to the finally in the enclosing region. unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block); // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. block->bbJumpKind = BBJ_ALWAYS; block->bbJumpDest = callBlock; block->bbJumpDest->bbRefs++; /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n", XTnum, block->bbNum, callBlock->bbNum); } #endif #else // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = block; callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB " to " "BBJ_CALLFINALLY block\n", XTnum, callBlock->bbNum); } #endif #endif // !FEATURE_EH_CALLFINALLY_THUNKS } else { // Calling the finally block. We already have a step block that is either the call-to-finally from a // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by // a 'finally'), or the step block is the return from a catch. // // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will // automatically re-raise the exception, using the return address of the catch (that is, the target // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64, // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly // within the 'try' region protected by the finally, since we generate code in such a way that execution // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on // stack walks.) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS if (step->bbJumpKind == BBJ_EHCATCHRET) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = step2; step->bbJumpDest->bbRefs++; step2->inheritWeight(block); step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is " "BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n", XTnum, step->bbNum, step2->bbNum); } #endif step = step2; assert(stepType == ST_Catch); // Leave it as catch type for now. } #endif // FEATURE_EH_CALLFINALLY_THUNKS #if FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1; unsigned callFinallyHndIndex = (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1; #else // !FEATURE_EH_CALLFINALLY_THUNKS unsigned callFinallyTryIndex = XTnum + 1; unsigned callFinallyHndIndex = 0; // don't care #endif // !FEATURE_EH_CALLFINALLY_THUNKS callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step); step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next // finally in the chain) step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ callBlock->inheritWeight(block); callBlock->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY " "block " FMT_BB "\n", XTnum, callBlock->bbNum); } #endif } step = fgNewBBafter(BBJ_ALWAYS, callBlock, true); stepType = ST_FinallyReturn; /* The new block will inherit this block's weight */ step->inheritWeight(block); step->bbFlags |= BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS; #ifdef DEBUG if (verbose) { printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) " "block " FMT_BB "\n", XTnum, step->bbNum); } #endif callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler. invalidatePreds = true; } else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) && !jitIsBetween(jmpAddr, tryBeg, tryEnd)) { // We are jumping out of a catch-protected try. // // If we are returning from a call to a finally, then we must have a step block within a try // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the // finally raises an exception), the VM will find this step block, notice that it is in a protected region, // and invoke the appropriate catch. // // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception), // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM, // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target // address of the catch return as the new exception address. That is, the re-raised exception appears to // occur at the catch return address. If this exception return address skips an enclosing try/catch that // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should. // For example: // // try { // try { // // something here raises ThreadAbortException // LEAVE LABEL_1; // no need to stop at LABEL_2 // } catch (Exception) { // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode. // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only // // need to do this transformation if the current EH block is a try/catch that catches // // ThreadAbortException (or one of its parents), however we might not be able to find that // // information, so currently we do it for all catch types. // LEAVE LABEL_1; // Convert this to LEAVE LABEL2; // } // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code // } catch (ThreadAbortException) { // } // LABEL_1: // // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C# // compiler. if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch)) { BasicBlock* catchStep; assert(step); if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); } else { assert(stepType == ST_Catch); assert(step->bbJumpKind == BBJ_EHCATCHRET); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step); step->bbJumpDest = catchStep; step->bbJumpDest->bbRefs++; #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /* The new block will inherit this block's weight */ catchStep->inheritWeight(block); catchStep->bbFlags |= BBF_IMPORTED; #ifdef DEBUG if (verbose) { if (stepType == ST_FinallyReturn) { printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } else { assert(stepType == ST_Catch); printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new " "BBJ_ALWAYS block " FMT_BB "\n", XTnum, catchStep->bbNum); } } #endif // DEBUG /* This block is the new step */ step = catchStep; stepType = ST_Try; invalidatePreds = true; } } } if (step == nullptr) { block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) { printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE " "block " FMT_BB " to BBJ_ALWAYS\n", block->bbNum); } #endif } else { step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { assert(step->bbJumpKind == BBJ_ALWAYS); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) #ifdef DEBUG if (verbose) { printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum); } #endif // Queue up the jump target for importing impImportBlockPending(leaveTarget); } if (invalidatePreds && fgComputePredsDone) { JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n"); fgRemovePreds(); } #ifdef DEBUG fgVerifyHandlerTab(); if (verbose) { printf("\nAfter import CEE_LEAVE:\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } #endif // FEATURE_EH_FUNCLETS /*****************************************************************************/ // This is called when reimporting a leave block. It resets the JumpKind, // JumpDest, and bbNext to the original values void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) { #if defined(FEATURE_EH_FUNCLETS) // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1) // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0, // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the // only predecessor are also considered orphans and attempted to be deleted. // // try { // .... // try // { // .... // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1 // } finally { } // } finally { } // OUTSIDE: // // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block. // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. if (block->bbJumpKind == BBJ_CALLFINALLY) { BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; dupBlock->copyEHRegion(block); dupBlock->bbCatchTyp = block->bbCatchTyp; // Mark this block as // a) not referenced by any other block to make sure that it gets deleted // b) weight zero // c) prevent from being imported // d) as internal // e) as rarely run dupBlock->bbRefs = 0; dupBlock->bbWeight = BB_ZERO_WEIGHT; dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY; // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS // will be next to each other. fgInsertBBafter(block, dupBlock); #ifdef DEBUG if (verbose) { printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum); } #endif } #endif // FEATURE_EH_FUNCLETS block->bbJumpKind = BBJ_LEAVE; fgInitBBLookup(); block->bbJumpDest = fgLookupBB(jmpAddr); // We will leave the BBJ_ALWAYS block we introduced. When it's reimported // the BBJ_ALWAYS block will be unreachable, and will be removed after. The // reason we don't want to remove the block at this point is that if we call // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be // added and the linked list length will be different than fgBBcount. } /*****************************************************************************/ // Get the first non-prefix opcode. Used for verification of valid combinations // of prefixes and actual opcodes. OPCODE Compiler::impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp) { while (codeAddr < codeEndp) { OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); if (opcode == CEE_PREFIX1) { if (codeAddr >= codeEndp) { break; } opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); codeAddr += sizeof(__int8); } switch (opcode) { case CEE_UNALIGNED: case CEE_VOLATILE: case CEE_TAILCALL: case CEE_CONSTRAINED: case CEE_READONLY: break; default: return opcode; } codeAddr += opcodeSizes[opcode]; } return CEE_ILLEGAL; } /*****************************************************************************/ // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes void Compiler::impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix) { OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!( // Opcode of all ldind and stdind happen to be in continuous, except stind.i. ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) || (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) || (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) || // volatile. prefix is allowed with the ldsfld and stsfld (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD))))) { BADCODE("Invalid opcode for unaligned. or volatile. prefix"); } } /*****************************************************************************/ #ifdef DEBUG #undef RETURN // undef contracts RETURN macro enum controlFlow_t { NEXT, CALL, RETURN, THROW, BRANCH, COND_BRANCH, BREAK, PHI, META, }; const static controlFlow_t controlFlow[] = { #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow, #include "opcode.def" #undef OPDEF }; #endif // DEBUG /***************************************************************************** * Determine the result type of an arithemetic operation * On 64-bit inserts upcasts when native int is mixed with int32 */ var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2) { var_types type = TYP_UNDEF; GenTree* op1 = *pOp1; GenTree* op2 = *pOp2; // Arithemetic operations are generally only allowed with // primitive types, but certain operations are allowed // with byrefs if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref1-byref2 => gives a native int type = TYP_I_IMPL; } else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF)) { // [native] int - byref => gives a native int // // The reason is that it is possible, in managed C++, // to have a tree like this: // // - // / \. // / \. // / \. // / \. // const(h) int addr byref // // <BUGNUM> VSW 318822 </BUGNUM> // // So here we decide to make the resulting type to be a native int. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef TARGET_64BIT if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_I_IMPL; } else { // byref - [native] int => gives a byref assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if ((genActualType(op2->TypeGet()) != TYP_I_IMPL)) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } } else if ((oper == GT_ADD) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF)) { // byref + [native] int => gives a byref // (or) // [native] int + byref => gives a byref // only one can be a byref : byref op byref not allowed assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet())); #ifdef TARGET_64BIT if (genActualType(op2->TypeGet()) == TYP_BYREF) { if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT type = TYP_BYREF; } #ifdef TARGET_64BIT else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long // we get this because in the IL the long isn't Int64, it's just IntPtr if (genActualType(op1->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } else if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { // insert an explicit upcast op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL); } type = TYP_I_IMPL; } #else // 32-bit TARGET else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG) { assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType)); // int + long => gives long // long + int => gives long type = TYP_LONG; } #endif // TARGET_64BIT else { // int + int => gives an int assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF); assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); type = genActualType(op1->gtType); // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT. // Otherwise, turn floats into doubles if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT)) { assert(genActualType(op2->gtType) == TYP_DOUBLE); type = TYP_DOUBLE; } } assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT); return type; } //------------------------------------------------------------------------ // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting // // Arguments: // op1 - value to cast // pResolvedToken - resolved token for type to cast to // isCastClass - true if this is a castclass, false if isinst // // Return Value: // tree representing optimized cast, or null if no optimization possible GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass) { assert(op1->TypeGet() == TYP_REF); // Don't optimize for minopts or debug codegen. if (opts.OptimizationDisabled()) { return nullptr; } // See what we know about the type of the object being cast. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull); if (fromClass != nullptr) { CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass; JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst", isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass), info.compCompHnd->getClassName(toClass)); // Perhaps we know if the cast will succeed or fail. TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass); if (castResult == TypeCompareState::Must) { // Cast will succeed, result is simply op1. JITDUMP("Cast will succeed, optimizing to simply return input\n"); return op1; } else if (castResult == TypeCompareState::MustNot) { // See if we can sharpen exactness by looking for final classes if (!isExact) { isExact = impIsClassExact(fromClass); } // Cast to exact type will fail. Handle case where we have // an exact type (that is, fromClass is not a subtype) // and we're not going to throw on failure. if (isExact && !isCastClass) { JITDUMP("Cast will fail, optimizing to return null\n"); GenTree* result = gtNewIconNode(0, TYP_REF); // If the cast was fed by a box, we can remove that too. if (op1->IsBoxedValue()) { JITDUMP("Also removing upstream box\n"); gtTryRemoveBoxUpstreamEffects(op1); } return result; } else if (isExact) { JITDUMP("Not optimizing failing castclass (yet)\n"); } else { JITDUMP("Can't optimize since fromClass is inexact\n"); } } else { JITDUMP("Result of cast unknown, must generate runtime test\n"); } } else { JITDUMP("\nCan't optimize since fromClass is unknown\n"); } return nullptr; } //------------------------------------------------------------------------ // impCastClassOrIsInstToTree: build and import castclass/isinst // // Arguments: // op1 - value to cast // op2 - type handle for type to cast to // pResolvedToken - resolved token from the cast operation // isCastClass - true if this is castclass, false means isinst // // Return Value: // Tree representing the cast // // Notes: // May expand into a series of runtime checks or a helper call. GenTree* Compiler::impCastClassOrIsInstToTree( GenTree* op1, GenTree* op2, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass, IL_OFFSET ilOffset) { assert(op1->TypeGet() == TYP_REF); // Optimistically assume the jit should expand this as an inline test bool shouldExpandInline = true; // Profitability check. // // Don't bother with inline expansion when jit is trying to // generate code quickly, or the cast is in code that won't run very // often, or the method already is pretty big. if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { // not worth the code expansion if jitting fast or in a rarely run block shouldExpandInline = false; } else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals()) { // not worth creating an untracked local variable shouldExpandInline = false; } else if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && (JitConfig.JitCastProfiling() == 1)) { // Optimizations are enabled but we're still instrumenting (including casts) if (isCastClass && !impIsClassExact(pResolvedToken->hClass)) { // Usually, we make a speculative assumption that it makes sense to expand castclass // even for non-sealed classes, but let's rely on PGO in this specific case shouldExpandInline = false; } } // Pessimistically assume the jit cannot expand this as an inline test bool canExpandInline = false; const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass); // Legality check. // // Not all classclass/isinst operations can be inline expanded. // Check legality only if an inline expansion is desirable. if (shouldExpandInline) { if (isCastClass) { // Jit can only inline expand the normal CHKCASTCLASS helper. canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS); } else { if (helper == CORINFO_HELP_ISINSTANCEOFCLASS) { // If the class is exact, the jit can expand the IsInst check inline. canExpandInline = impIsClassExact(pResolvedToken->hClass); } } } const bool expandInline = canExpandInline && shouldExpandInline; if (!expandInline) { JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // If we CSE this class handle we prevent assertionProp from making SubType assertions // so instead we force the CSE logic to not consider CSE-ing this class handle. // op2->gtFlags |= GTF_DONT_CSE; GenTreeCall* call = gtNewHelperCallNode(helper, TYP_REF, gtNewCallArgs(op2, op1)); if (impIsCastHelperEligibleForClassProbe(call) && !impIsClassExact(pResolvedToken->hClass)) { ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return call; } JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst"); impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2")); GenTree* temp; GenTree* condMT; // // expand the methodtable match: // // condMT ==> GT_NE // / \. // GT_IND op2 (typically CNS_INT) // | // op1Copy // // This can replace op1 with a GT_COMMA that evaluates op1 into a local // op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1")); // // op1 is now known to be a non-complex tree // thus we can use gtClone(op1) from now on // GenTree* op2Var = op2; if (isCastClass) { op2Var = fgInsertCommaFormTemp(&op2); lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true; } temp = gtNewMethodTableLookup(temp); condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2); GenTree* condNull; // // expand the null check: // // condNull ==> GT_EQ // / \. // op1Copy CNS_INT // null // condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF)); // // expand the true and false trees for the condMT // GenTree* condFalse = gtClone(op1); GenTree* condTrue; if (isCastClass) { // // use the special helper that skips the cases checked by our inlined cast // const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL; condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewCallArgs(op2Var, gtClone(op1))); } else { condTrue = gtNewIconNode(0, TYP_REF); } GenTree* qmarkMT; // // Generate first QMARK - COLON tree // // qmarkMT ==> GT_QMARK // / \. // condMT GT_COLON // / \. // condFalse condTrue // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse); qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp->AsColon()); if (isCastClass && impIsClassExact(pResolvedToken->hClass) && condTrue->OperIs(GT_CALL)) { // condTrue is used only for throwing InvalidCastException in case of casting to an exact class. condTrue->AsCall()->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN; } GenTree* qmarkNull; // // Generate second QMARK - COLON tree // // qmarkNull ==> GT_QMARK // / \. // condNull GT_COLON // / \. // qmarkMT op1Copy // temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT); qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp->AsColon()); qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF; // Make QMark node a top level node by spilling it. unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2")); impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE); // TODO-CQ: Is it possible op1 has a better type? // // See also gtGetHelperCallClassHandle where we make the same // determination for the helper call variants. LclVarDsc* lclDsc = lvaGetDesc(tmp); assert(lclDsc->lvSingleDef == 0); lclDsc->lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); lvaSetClass(tmp, pResolvedToken->hClass); return gtNewLclvNode(tmp, TYP_REF); } #ifndef DEBUG #define assertImp(cond) ((void)0) #else #define assertImp(cond) \ do \ { \ if (!(cond)) \ { \ const int cchAssertImpBuf = 600; \ char* assertImpBuf = (char*)_alloca(cchAssertImpBuf); \ _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \ "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \ impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \ op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \ assertAbort(assertImpBuf, __FILE__, __LINE__); \ } \ } while (0) #endif // DEBUG //------------------------------------------------------------------------ // impBlockIsInALoop: check if a block might be in a loop // // Arguments: // block - block to check // // Returns: // true if the block might be in a loop. // // Notes: // Conservatively correct; may return true for some blocks that are // not actually in loops. // bool Compiler::impBlockIsInALoop(BasicBlock* block) { return (compIsForInlining() && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0)) || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0); } #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif /***************************************************************************** * Import the instr for the given basic block */ void Compiler::impImportBlockCode(BasicBlock* block) { #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind) #ifdef DEBUG if (verbose) { printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName); } #endif unsigned nxtStmtIndex = impInitBlockLineInfo(); IL_OFFSET nxtStmtOffs; CorInfoHelpFunc helper; CorInfoIsAccessAllowedResult accessAllowedResult; CORINFO_HELPER_DESC calloutHelper; const BYTE* lastLoadToken = nullptr; /* Get the tree list started */ impBeginTreeList(); #ifdef FEATURE_ON_STACK_REPLACEMENT bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0); #ifdef DEBUG // Optionally suppress patchpoints by method hash // static ConfigMethodRange JitEnablePatchpointRange; JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); const bool inRange = JitEnablePatchpointRange.Contains(hash); enablePatchpoints &= inRange; #endif // DEBUG if (enablePatchpoints) { // We don't inline at Tier0, if we do, we may need rethink our approach. // Could probably support inlines that don't introduce flow. // assert(!compIsForInlining()); // OSR is not yet supported for methods with explicit tail calls. // // But we also do not have to switch these methods to be optimized, as we should be // able to avoid getting trapped in Tier0 code by normal call counting. // So instead, just suppress adding patchpoints. // if (!compTailPrefixSeen) { // We only need to add patchpoints if the method can loop. // if (compHasBackwardJump) { assert(compCanHavePatchpoints()); // By default we use the "adaptive" strategy. // // This can create both source and target patchpoints within a given // loop structure, which isn't ideal, but is not incorrect. We will // just have some extra Tier0 overhead. // // Todo: implement support for mid-block patchpoints. If `block` // is truly a backedge source (and not in a handler) then we should be // able to find a stack empty point somewhere in the block. // const int patchpointStrategy = JitConfig.TC_PatchpointStrategy(); bool addPatchpoint = false; bool mustUseTargetPatchpoint = false; switch (patchpointStrategy) { default: { // Patchpoints at backedge sources, if possible, otherwise targets. // addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE); mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex(); break; } case 1: { // Patchpoints at stackempty backedge targets. // Note if we have loops where the IL stack is not empty on the backedge we can't patchpoint // them. // // We should not have allowed OSR if there were backedges in handlers. // assert(!block->hasHndIndex()); addPatchpoint = ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET) && (verCurrentState.esStackDepth == 0); break; } case 2: { // Adaptive strategy. // // Patchpoints at backedge targets if there are multiple backedges, // otherwise at backedge sources, if possible. Note a block can be both; if so we // just need one patchpoint. // if ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET) { // We don't know backedge count, so just use ref count. // addPatchpoint = (block->bbRefs > 1) && (verCurrentState.esStackDepth == 0); } if (!addPatchpoint && ((block->bbFlags & BBF_BACKWARD_JUMP_SOURCE) == BBF_BACKWARD_JUMP_SOURCE)) { addPatchpoint = true; mustUseTargetPatchpoint = (verCurrentState.esStackDepth != 0) || block->hasHndIndex(); // Also force target patchpoint if target block has multiple (backedge) preds. // if (!mustUseTargetPatchpoint) { for (BasicBlock* const succBlock : block->Succs(this)) { if ((succBlock->bbNum <= block->bbNum) && (succBlock->bbRefs > 1)) { mustUseTargetPatchpoint = true; break; } } } } break; } } if (addPatchpoint) { if (mustUseTargetPatchpoint) { // We wanted a source patchpoint, but could not have one. // So, add patchpoints to the backedge targets. // for (BasicBlock* const succBlock : block->Succs(this)) { if (succBlock->bbNum <= block->bbNum) { // The succBlock had better agree it's a target. // assert((succBlock->bbFlags & BBF_BACKWARD_JUMP_TARGET) == BBF_BACKWARD_JUMP_TARGET); // We may already have decided to put a patchpoint in succBlock. If not, add one. // if ((succBlock->bbFlags & BBF_PATCHPOINT) != 0) { // In some cases the target may not be stack-empty at entry. // If so, we will bypass patchpoints for this backedge. // if (succBlock->bbStackDepthOnEntry() > 0) { JITDUMP("\nCan't set source patchpoint at " FMT_BB ", can't use target " FMT_BB " as it has non-empty stack on entry.\n", block->bbNum, succBlock->bbNum); } else { JITDUMP("\nCan't set source patchpoint at " FMT_BB ", using target " FMT_BB " instead\n", block->bbNum, succBlock->bbNum); assert(!succBlock->hasHndIndex()); succBlock->bbFlags |= BBF_PATCHPOINT; } } } } } else { assert(!block->hasHndIndex()); block->bbFlags |= BBF_PATCHPOINT; } setMethodHasPatchpoint(); } } else { // Should not see backward branch targets w/o backwards branches. // So if !compHasBackwardsBranch, these flags should never be set. // assert((block->bbFlags & (BBF_BACKWARD_JUMP_TARGET | BBF_BACKWARD_JUMP_SOURCE)) == 0); } } #ifdef DEBUG // As a stress test, we can place patchpoints at the start of any block // that is a stack empty point and is not within a handler. // // Todo: enable for mid-block stack empty points too. // const int offsetOSR = JitConfig.JitOffsetOnStackReplacement(); const int randomOSR = JitConfig.JitRandomOnStackReplacement(); const bool tryOffsetOSR = offsetOSR >= 0; const bool tryRandomOSR = randomOSR > 0; if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0)) { // Block start can have a patchpoint. See if we should add one. // bool addPatchpoint = false; // Specific offset? // if (tryOffsetOSR) { if (impCurOpcOffs == (unsigned)offsetOSR) { addPatchpoint = true; } } // Random? // else { // Reuse the random inliner's random state. // Note m_inlineStrategy is always created, even if we're not inlining. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(randomOSR); const int randomValue = (int)random->Next(100); addPatchpoint = (randomValue < randomOSR); } if (addPatchpoint) { block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); } JITDUMP("\n** %s patchpoint%s added to " FMT_BB " (il offset %u)\n", tryOffsetOSR ? "offset" : "random", addPatchpoint ? "" : " not", block->bbNum, impCurOpcOffs); } #endif // DEBUG } // Mark stack-empty rare blocks to be considered for partial compilation. // // Ideally these are conditionally executed blocks -- if the method is going // to unconditionally throw, there's not as much to be gained by deferring jitting. // For now, we just screen out the entry bb. // // In general we might want track all the IL stack empty points so we can // propagate rareness back through flow and place the partial compilation patchpoints "earlier" // so there are fewer overall. // // Note unlike OSR, it's ok to forgo these. // // Todo: stress mode... // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) && compCanHavePatchpoints() && !compTailPrefixSeen) { // Is this block a good place for partial compilation? // if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) && ((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex()) { JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum); block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT; setMethodHasPartialCompilationPatchpoint(); // Change block to BBJ_THROW so we won't trigger importation of successors. // block->bbJumpKind = BBJ_THROW; // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. // if (info.compMethodInfo->options & (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE)) { lvaGenericsContextInUse = true; } return; } } #endif // FEATURE_ON_STACK_REPLACEMENT /* Walk the opcodes that comprise the basic block */ const BYTE* codeAddr = info.compCode + block->bbCodeOffs; const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd; IL_OFFSET opcodeOffs = block->bbCodeOffs; IL_OFFSET lastSpillOffs = opcodeOffs; signed jmpDist; /* remember the start of the delegate creation sequence (used for verification) */ const BYTE* delegateCreateStart = nullptr; int prefixFlags = 0; bool explicitTailCall, constraintCall, readonlyCall; typeInfo tiRetVal; unsigned numArgs = info.compArgsCount; /* Now process all the opcodes in the block */ var_types callTyp = TYP_COUNT; OPCODE prevOpcode = CEE_ILLEGAL; if (block->bbCatchTyp) { if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) { impCurStmtOffsSet(block->bbCodeOffs); } // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block // to a temp. This is a trade off for code simplicity impSpillSpecialSideEff(); } while (codeAddr < codeEndp) { #ifdef FEATURE_READYTORUN bool usingReadyToRunHelper = false; #endif CORINFO_RESOLVED_TOKEN resolvedToken; CORINFO_RESOLVED_TOKEN constrainedResolvedToken; CORINFO_CALL_INFO callInfo; CORINFO_FIELD_INFO fieldInfo; tiRetVal = typeInfo(); // Default type info //--------------------------------------------------------------------- /* We need to restrict the max tree depth as many of the Compiler functions are recursive. We do this by spilling the stack */ if (verCurrentState.esStackDepth) { /* Has it been a while since we last saw a non-empty stack (which guarantees that the tree depth isnt accumulating. */ if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode)) { impSpillStackEnsure(); lastSpillOffs = opcodeOffs; } } else { lastSpillOffs = opcodeOffs; impBoxTempInUse = false; // nothing on the stack, box temp OK to use again } /* Compute the current instr offset */ opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); #ifndef DEBUG if (opts.compDbgInfo) #endif { nxtStmtOffs = (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET; /* Have we reached the next stmt boundary ? */ if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs) { assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]); if (verCurrentState.esStackDepth != 0 && opts.compDbgCode) { /* We need to provide accurate IP-mapping at this point. So spill anything on the stack so that it will form gtStmts with the correct stmt offset noted */ impSpillStackEnsure(true); } // Have we reported debug info for any tree? if (impCurStmtDI.IsValid() && opts.compDbgCode) { GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); assert(!impCurStmtDI.IsValid()); } if (!impCurStmtDI.IsValid()) { /* Make sure that nxtStmtIndex is in sync with opcodeOffs. If opcodeOffs has gone past nxtStmtIndex, catch up */ while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount && info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs) { nxtStmtIndex++; } /* Go to the new stmt */ impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]); /* Update the stmt boundary index */ nxtStmtIndex++; assert(nxtStmtIndex <= info.compStmtOffsetsCount); /* Are there any more line# entries after this one? */ if (nxtStmtIndex < info.compStmtOffsetsCount) { /* Remember where the next line# starts */ nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex]; } else { /* No more line# entries */ nxtStmtOffs = BAD_IL_OFFSET; } } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) && (verCurrentState.esStackDepth == 0)) { /* At stack-empty locations, we have already added the tree to the stmt list with the last offset. We just need to update impCurStmtDI */ impCurStmtOffsSet(opcodeOffs); } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) && impOpcodeIsCallSiteBoundary(prevOpcode)) { /* Make sure we have a type cached */ assert(callTyp != TYP_COUNT); if (callTyp == TYP_VOID) { impCurStmtOffsSet(opcodeOffs); } else if (opts.compDbgCode) { impSpillStackEnsure(true); impCurStmtOffsSet(opcodeOffs); } } else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP)) { if (opts.compDbgCode) { impSpillStackEnsure(true); } impCurStmtOffsSet(opcodeOffs); } assert(!impCurStmtDI.IsValid() || (nxtStmtOffs == BAD_IL_OFFSET) || (impCurStmtDI.GetLocation().GetOffset() <= nxtStmtOffs)); } CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL); CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL); var_types lclTyp, ovflType = TYP_UNKNOWN; GenTree* op1 = DUMMY_INIT(NULL); GenTree* op2 = DUMMY_INIT(NULL); GenTree* newObjThisPtr = DUMMY_INIT(NULL); bool uns = DUMMY_INIT(false); bool isLocal = false; /* Get the next opcode and the size of its parameters */ OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr); codeAddr += sizeof(__int8); #ifdef DEBUG impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs); #endif DECODE_OPCODE: // Return if any previous code has caused inline to fail. if (compDonotInline()) { return; } /* Get the size of additional parameters */ signed int sz = opcodeSizes[opcode]; #ifdef DEBUG clsHnd = NO_CLASS_HANDLE; lclTyp = TYP_COUNT; callTyp = TYP_COUNT; impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1); impCurOpcName = opcodeNames[opcode]; if (verbose && (opcode != CEE_PREFIX1)) { printf("%s", impCurOpcName); } /* Use assertImp() to display the opcode */ op1 = op2 = nullptr; #endif /* See what kind of an opcode we have, then */ unsigned mflags = 0; unsigned clsFlags = 0; switch (opcode) { unsigned lclNum; var_types type; GenTree* op3; genTreeOps oper; unsigned size; int val; CORINFO_SIG_INFO sig; IL_OFFSET jmpAddr; bool ovfl, unordered, callNode; bool ldstruct; CORINFO_CLASS_HANDLE tokenType; union { int intVal; float fltVal; __int64 lngVal; double dblVal; } cval; case CEE_PREFIX1: opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; SPILL_APPEND: // We need to call impSpillLclRefs() for a struct type lclVar. // This is because there may be loads of that lclVar on the evaluation stack, and // we need to ensure that those loads are completed before we modify it. if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtGetOp1())) { GenTree* lhs = op1->gtGetOp1(); GenTreeLclVarCommon* lclVar = nullptr; if (lhs->gtOper == GT_LCL_VAR) { lclVar = lhs->AsLclVarCommon(); } else if (lhs->OperIsBlk()) { // Check if LHS address is within some struct local, to catch // cases where we're updating the struct by something other than a stfld GenTree* addr = lhs->AsBlk()->Addr(); // Catches ADDR(LCL_VAR), or ADD(ADDR(LCL_VAR),CNS_INT)) lclVar = addr->IsLocalAddrExpr(); // Catches ADDR(FIELD(... ADDR(LCL_VAR))) if (lclVar == nullptr) { GenTree* lclTree = nullptr; if (impIsAddressInLocal(addr, &lclTree)) { lclVar = lclTree->AsLclVarCommon(); } } } if (lclVar != nullptr) { impSpillLclRefs(lclVar->GetLclNum()); } } /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); goto DONE_APPEND; APPEND: /* Append 'op1' to the list of statements */ impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); goto DONE_APPEND; DONE_APPEND: #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif break; case CEE_LDNULL: impPushNullObjRefOnStack(); break; case CEE_LDC_I4_M1: case CEE_LDC_I4_0: case CEE_LDC_I4_1: case CEE_LDC_I4_2: case CEE_LDC_I4_3: case CEE_LDC_I4_4: case CEE_LDC_I4_5: case CEE_LDC_I4_6: case CEE_LDC_I4_7: case CEE_LDC_I4_8: cval.intVal = (opcode - CEE_LDC_I4_0); assert(-1 <= cval.intVal && cval.intVal <= 8); goto PUSH_I4CON; case CEE_LDC_I4_S: cval.intVal = getI1LittleEndian(codeAddr); goto PUSH_I4CON; case CEE_LDC_I4: cval.intVal = getI4LittleEndian(codeAddr); goto PUSH_I4CON; PUSH_I4CON: JITDUMP(" %d", cval.intVal); impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT)); break; case CEE_LDC_I8: cval.lngVal = getI8LittleEndian(codeAddr); JITDUMP(" 0x%016llx", cval.lngVal); impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG)); break; case CEE_LDC_R8: cval.dblVal = getR8LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE)); break; case CEE_LDC_R4: cval.dblVal = getR4LittleEndian(codeAddr); JITDUMP(" %#.17g", cval.dblVal); impPushOnStack(gtNewDconNode(cval.dblVal, TYP_FLOAT), typeInfo(TI_DOUBLE)); break; case CEE_LDSTR: val = getU4LittleEndian(codeAddr); JITDUMP(" %08X", val); impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal); break; case CEE_LDARG: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDARG_0: case CEE_LDARG_1: case CEE_LDARG_2: case CEE_LDARG_3: lclNum = (opcode - CEE_LDARG_0); assert(lclNum >= 0 && lclNum < 4); impLoadArg(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC: lclNum = getU2LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_S: lclNum = getU1LittleEndian(codeAddr); JITDUMP(" %u", lclNum); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_LDLOC_0: case CEE_LDLOC_1: case CEE_LDLOC_2: case CEE_LDLOC_3: lclNum = (opcode - CEE_LDLOC_0); assert(lclNum >= 0 && lclNum < 4); impLoadLoc(lclNum, opcodeOffs + sz + 1); break; case CEE_STARG: lclNum = getU2LittleEndian(codeAddr); goto STARG; case CEE_STARG_S: lclNum = getU1LittleEndian(codeAddr); STARG: JITDUMP(" %u", lclNum); if (compIsForInlining()) { op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); noway_assert(op1->gtOper == GT_LCL_VAR); lclNum = op1->AsLclVar()->GetLclNum(); goto VAR_ST_VALID; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } // We should have seen this arg write in the prescan assert(lvaTable[lclNum].lvHasILStoreOp); goto VAR_ST; case CEE_STLOC: lclNum = getU2LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_S: lclNum = getU1LittleEndian(codeAddr); isLocal = true; JITDUMP(" %u", lclNum); goto LOC_ST; case CEE_STLOC_0: case CEE_STLOC_1: case CEE_STLOC_2: case CEE_STLOC_3: isLocal = true; lclNum = (opcode - CEE_STLOC_0); assert(lclNum >= 0 && lclNum < 4); LOC_ST: if (compIsForInlining()) { lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp")); goto _PopValue; } lclNum += numArgs; VAR_ST: if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var) { BADCODE("Bad IL"); } VAR_ST_VALID: /* if it is a struct assignment, make certain we don't overflow the buffer */ assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd)); if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } _PopValue: /* Pop the value being assigned */ { StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; tiRetVal = se.seTypeInfo; } #ifdef FEATURE_SIMD if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet())) { assert(op1->TypeGet() == TYP_STRUCT); op1->gtType = lclTyp; } #endif // FEATURE_SIMD op1 = impImplicitIorI4Cast(op1, lclTyp); #ifdef TARGET_64BIT // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT)) { op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT); } #endif // TARGET_64BIT // We had better assign it a value of the correct type assertImp( genActualType(lclTyp) == genActualType(op1->gtType) || (genActualType(lclTyp) == TYP_I_IMPL && op1->IsLocalAddrExpr() != nullptr) || (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) || (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) || (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) || ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF)); /* If op1 is "&var" then its type is the transient "*" and it can be used either as TYP_BYREF or TYP_I_IMPL */ if (op1->IsLocalAddrExpr() != nullptr) { assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF); /* When "&var" is created, we assume it is a byref. If it is being assigned to a TYP_I_IMPL var, change the type to prevent unnecessary GC info */ if (genActualType(lclTyp) == TYP_I_IMPL) { op1->gtType = TYP_I_IMPL; } } // If this is a local and the local is a ref type, see // if we can improve type information based on the // value being assigned. if (isLocal && (lclTyp == TYP_REF)) { // We should have seen a stloc in our IL prescan. assert(lvaTable[lclNum].lvHasILStoreOp); // Is there just one place this local is defined? const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef; // Conservative check that there is just one // definition that reaches this store. const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0); if (isSingleDefLocal && hasSingleReachingDef) { lvaUpdateClass(lclNum, op1, clsHnd); } } /* Filter out simple assignments to itself */ if (op1->gtOper == GT_LCL_VAR && lclNum == op1->AsLclVarCommon()->GetLclNum()) { if (opts.compDbgCode) { op1 = gtNewNothingNode(); goto SPILL_APPEND; } else { break; } } /* Create the assignment node */ op2 = gtNewLclvNode(lclNum, lclTyp DEBUGARG(opcodeOffs + sz + 1)); /* If the local is aliased or pinned, we need to spill calls and indirections from the stack. */ if ((lvaTable[lclNum].IsAddressExposed() || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) && (verCurrentState.esStackDepth > 0)) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned")); } /* Spill any refs to the local from the stack */ impSpillLclRefs(lclNum); // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op2' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet()); } if (varTypeIsStruct(lclTyp)) { op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL); } else { op1 = gtNewAssignNode(op2, op1); } goto SPILL_APPEND; case CEE_LDLOCA: lclNum = getU2LittleEndian(codeAddr); goto LDLOCA; case CEE_LDLOCA_S: lclNum = getU1LittleEndian(codeAddr); LDLOCA: JITDUMP(" %u", lclNum); if (compIsForInlining()) { // Get the local type lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo; /* Have we allocated a temp for this local? */ lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp")); assert(!lvaGetDesc(lclNum)->lvNormalizeOnLoad()); op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum)); goto _PUSH_ADRVAR; } lclNum += numArgs; assertImp(lclNum < info.compLocalsCount); goto ADRVAR; case CEE_LDARGA: lclNum = getU2LittleEndian(codeAddr); goto LDARGA; case CEE_LDARGA_S: lclNum = getU1LittleEndian(codeAddr); LDARGA: JITDUMP(" %u", lclNum); Verify(lclNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument, // followed by a ldfld to load the field. op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo); if (op1->gtOper != GT_LCL_VAR) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR); return; } assert(op1->gtOper == GT_LCL_VAR); goto _PUSH_ADRVAR; } lclNum = compMapILargNum(lclNum); // account for possible hidden param assertImp(lclNum < numArgs); if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } goto ADRVAR; ADRVAR: op1 = impCreateLocalNode(lclNum DEBUGARG(opcodeOffs + sz + 1)); _PUSH_ADRVAR: assert(op1->gtOper == GT_LCL_VAR); /* Note that this is supposed to create the transient type "*" which may be used as a TYP_I_IMPL. However we catch places where it is used as a TYP_I_IMPL and change the node if needed. Thus we are pessimistic and may report byrefs in the GC info where it was not absolutely needed, but it is safer this way. */ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does assert((op1->gtFlags & GTF_GLOB_REF) == 0); tiRetVal = lvaTable[lclNum].lvVerTypeInfo; impPushOnStack(op1, tiRetVal); break; case CEE_ARGLIST: if (!info.compIsVarArgs) { BADCODE("arglist in non-vararg method"); } assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG); /* The ARGLIST cookie is a hidden 'last' parameter, we have already adjusted the arg count cos this is like fetching the last param */ assertImp(0 < numArgs); lclNum = lvaVarargsHandleArg; op1 = gtNewLclvNode(lclNum, TYP_I_IMPL DEBUGARG(opcodeOffs + sz + 1)); op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1); impPushOnStack(op1, tiRetVal); break; case CEE_ENDFINALLY: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY); return; } if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } if (info.compXcptnsCount == 0) { BADCODE("endfinally outside finally"); } assert(verCurrentState.esStackDepth == 0); op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr); goto APPEND; case CEE_ENDFILTER: if (compIsForInlining()) { assert(!"Shouldn't have exception handlers in the inliner!"); compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER); return; } block->bbSetRunRarely(); // filters are rare if (info.compXcptnsCount == 0) { BADCODE("endfilter outside filter"); } op1 = impPopStack().val; assertImp(op1->gtType == TYP_INT); if (!bbInFilterILRange(block)) { BADCODE("EndFilter outside a filter handler"); } /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET); /* Mark catch handler as successor */ op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1); if (verCurrentState.esStackDepth != 0) { verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__) DEBUGARG(__LINE__)); } goto APPEND; case CEE_RET: prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it RET: if (!impReturnInstruction(prefixFlags, opcode)) { return; // abort } else { break; } case CEE_JMP: assert(!compIsForInlining()); if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex()) { /* CEE_JMP does not make sense in some "protected" regions. */ BADCODE("Jmp not allowed in protected region"); } if (opts.IsReversePInvoke()) { BADCODE("Jmp not allowed in reverse P/Invoke"); } if (verCurrentState.esStackDepth != 0) { BADCODE("Stack must be empty after CEE_JMPs"); } _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); /* The signature of the target has to be identical to ours. At least check that argCnt and returnType match */ eeGetMethodSig(resolvedToken.hMethod, &sig); if (sig.numArgs != info.compMethodInfo->args.numArgs || sig.retType != info.compMethodInfo->args.retType || sig.callConv != info.compMethodInfo->args.callConv) { BADCODE("Incompatible target for CEE_JMPs"); } op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod); /* Mark the basic block as being a JUMP instead of RETURN */ block->bbFlags |= BBF_HAS_JMP; /* Set this flag to make sure register arguments have a location assigned * even if we don't use them inside the method */ compJmpOpUsed = true; fgNoStructPromotion = true; goto APPEND; case CEE_LDELEMA: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a value class array we just do a simple address-of if (eeIsValueClass(ldelemClsHnd)) { CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd); if (cit == CORINFO_TYPE_UNDEF) { lclTyp = TYP_STRUCT; } else { lclTyp = JITtype2varType(cit); } goto ARR_LD_POST_VERIFY; } // Similarly, if its a readonly access, we can do a simple address-of // without doing a runtime type-check if (prefixFlags & PREFIX_READONLY) { lclTyp = TYP_REF; goto ARR_LD_POST_VERIFY; } // Otherwise we need the full helper function with run-time type check op1 = impTokenToHandle(&resolvedToken); if (op1 == nullptr) { // compDonotInline() return; } { GenTreeCall::Use* args = gtNewCallArgs(op1); // Type args = gtPrependNewCallArg(impPopStack().val, args); // index args = gtPrependNewCallArg(impPopStack().val, args); // array op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args); } impPushOnStack(op1, tiRetVal); break; // ldelem for reference and value types case CEE_LDELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); ldelemClsHnd = resolvedToken.hClass; // If it's a reference type or generic variable type // then just generate code as though it's a ldelem.ref instruction if (!eeIsValueClass(ldelemClsHnd)) { lclTyp = TYP_REF; opcode = CEE_LDELEM_REF; } else { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd); lclTyp = JITtype2varType(jitTyp); tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct tiRetVal.NormaliseForStack(); } goto ARR_LD_POST_VERIFY; case CEE_LDELEM_I1: lclTyp = TYP_BYTE; goto ARR_LD; case CEE_LDELEM_I2: lclTyp = TYP_SHORT; goto ARR_LD; case CEE_LDELEM_I: lclTyp = TYP_I_IMPL; goto ARR_LD; // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter // and treating it as TYP_INT avoids other asserts. case CEE_LDELEM_U4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I4: lclTyp = TYP_INT; goto ARR_LD; case CEE_LDELEM_I8: lclTyp = TYP_LONG; goto ARR_LD; case CEE_LDELEM_REF: lclTyp = TYP_REF; goto ARR_LD; case CEE_LDELEM_R4: lclTyp = TYP_FLOAT; goto ARR_LD; case CEE_LDELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_LD; case CEE_LDELEM_U1: lclTyp = TYP_UBYTE; goto ARR_LD; case CEE_LDELEM_U2: lclTyp = TYP_USHORT; goto ARR_LD; ARR_LD: ARR_LD_POST_VERIFY: /* Pull the index value and array address */ op2 = impPopStack().val; op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); /* Check for null pointer - in the inliner case we simply abort */ if (compIsForInlining()) { if (op1->gtOper == GT_CNS_INT) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM); return; } } /* Mark the block as containing an index expression */ if (op1->gtOper == GT_LCL_VAR) { if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node and push it on the stack */ op1 = gtNewIndexRef(lclTyp, op1, op2); ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT); if ((opcode == CEE_LDELEMA) || ldstruct || (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd))) { assert(ldelemClsHnd != DUMMY_INIT(NULL)); // remember the element size if (lclTyp == TYP_REF) { op1->AsIndex()->gtIndElemSize = TARGET_POINTER_SIZE; } else { // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type. if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF) { op1->AsIndex()->gtStructElemClass = ldelemClsHnd; } assert(lclTyp != TYP_STRUCT || op1->AsIndex()->gtStructElemClass != nullptr); if (lclTyp == TYP_STRUCT) { size = info.compCompHnd->getClassSize(ldelemClsHnd); op1->AsIndex()->gtIndElemSize = size; op1->gtType = lclTyp; } } if ((opcode == CEE_LDELEMA) || ldstruct) { // wrap it in a & lclTyp = TYP_BYREF; op1 = gtNewOperNode(GT_ADDR, lclTyp, op1); } else { assert(lclTyp != TYP_STRUCT); } } if (ldstruct) { // Create an OBJ for the result op1 = gtNewObjNode(ldelemClsHnd, op1); op1->gtFlags |= GTF_EXCEPT; } impPushOnStack(op1, tiRetVal); break; // stelem for reference and value types case CEE_STELEM: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); stelemClsHnd = resolvedToken.hClass; // If it's a reference type just behave as though it's a stelem.ref instruction if (!eeIsValueClass(stelemClsHnd)) { goto STELEM_REF_POST_VERIFY; } // Otherwise extract the type { CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd); lclTyp = JITtype2varType(jitTyp); goto ARR_ST_POST_VERIFY; } case CEE_STELEM_REF: STELEM_REF_POST_VERIFY: if (opts.OptimizationEnabled()) { GenTree* array = impStackTop(2).val; GenTree* value = impStackTop().val; // Is this a case where we can skip the covariant store check? if (impCanSkipCovariantStoreCheck(value, array)) { lclTyp = TYP_REF; goto ARR_ST_POST_VERIFY; } } // Else call a helper function to do the assignment op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopCallArgs(3, nullptr)); goto SPILL_APPEND; case CEE_STELEM_I1: lclTyp = TYP_BYTE; goto ARR_ST; case CEE_STELEM_I2: lclTyp = TYP_SHORT; goto ARR_ST; case CEE_STELEM_I: lclTyp = TYP_I_IMPL; goto ARR_ST; case CEE_STELEM_I4: lclTyp = TYP_INT; goto ARR_ST; case CEE_STELEM_I8: lclTyp = TYP_LONG; goto ARR_ST; case CEE_STELEM_R4: lclTyp = TYP_FLOAT; goto ARR_ST; case CEE_STELEM_R8: lclTyp = TYP_DOUBLE; goto ARR_ST; ARR_ST: ARR_ST_POST_VERIFY: /* The strict order of evaluation is LHS-operands, RHS-operands, range-check, and then assignment. However, codegen currently does the range-check before evaluation the RHS-operands. So to maintain strict ordering, we spill the stack. */ if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Strict ordering of exceptions for Array store")); } /* Pull the new value from the stack */ op2 = impPopStack().val; /* Pull the index value */ op1 = impPopStack().val; /* Pull the array address */ op3 = impPopStack().val; assertImp(op3->gtType == TYP_REF); if (op2->IsLocalAddrExpr() != nullptr) { op2->gtType = TYP_I_IMPL; } // Mark the block as containing an index expression if (op3->gtOper == GT_LCL_VAR) { if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD) { block->bbFlags |= BBF_HAS_IDX_LEN; optMethodFlags |= OMF_HAS_ARRAYREF; } } /* Create the index node */ op1 = gtNewIndexRef(lclTyp, op3, op1); /* Create the assignment node and append it */ if (lclTyp == TYP_STRUCT) { assert(stelemClsHnd != DUMMY_INIT(NULL)); op1->AsIndex()->gtStructElemClass = stelemClsHnd; op1->AsIndex()->gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd); } if (varTypeIsStruct(op1)) { op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL); } else { op2 = impImplicitR4orR8Cast(op2, op1->TypeGet()); op1 = gtNewAssignNode(op1, op2); } /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; goto SPILL_APPEND; case CEE_ADD: oper = GT_ADD; goto MATH_OP2; case CEE_ADD_OVF: uns = false; goto ADD_OVF; case CEE_ADD_OVF_UN: uns = true; goto ADD_OVF; ADD_OVF: ovfl = true; callNode = false; oper = GT_ADD; goto MATH_OP2_FLAGS; case CEE_SUB: oper = GT_SUB; goto MATH_OP2; case CEE_SUB_OVF: uns = false; goto SUB_OVF; case CEE_SUB_OVF_UN: uns = true; goto SUB_OVF; SUB_OVF: ovfl = true; callNode = false; oper = GT_SUB; goto MATH_OP2_FLAGS; case CEE_MUL: oper = GT_MUL; goto MATH_MAYBE_CALL_NO_OVF; case CEE_MUL_OVF: uns = false; goto MUL_OVF; case CEE_MUL_OVF_UN: uns = true; goto MUL_OVF; MUL_OVF: ovfl = true; oper = GT_MUL; goto MATH_MAYBE_CALL_OVF; // Other binary math operations case CEE_DIV: oper = GT_DIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_DIV_UN: oper = GT_UDIV; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM: oper = GT_MOD; goto MATH_MAYBE_CALL_NO_OVF; case CEE_REM_UN: oper = GT_UMOD; goto MATH_MAYBE_CALL_NO_OVF; MATH_MAYBE_CALL_NO_OVF: ovfl = false; MATH_MAYBE_CALL_OVF: // Morpher has some complex logic about when to turn different // typed nodes on different platforms into helper calls. We // need to either duplicate that logic here, or just // pessimistically make all the nodes large enough to become // call nodes. Since call nodes aren't that much larger and // these opcodes are infrequent enough I chose the latter. callNode = true; goto MATH_OP2_FLAGS; case CEE_AND: oper = GT_AND; goto MATH_OP2; case CEE_OR: oper = GT_OR; goto MATH_OP2; case CEE_XOR: oper = GT_XOR; goto MATH_OP2; MATH_OP2: // For default values of 'ovfl' and 'callNode' ovfl = false; callNode = false; MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set /* Pull two values and push back the result */ op2 = impPopStack().val; op1 = impPopStack().val; /* Can't do arithmetic with references */ assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF); // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only // if it is in the stack) impBashVarAddrsToI(op1, op2); type = impGetByRefResultType(oper, uns, &op1, &op2); assert(!ovfl || !varTypeIsFloating(op1->gtType)); /* Special case: "int+0", "int-0", "int*1", "int/1" */ if (op2->gtOper == GT_CNS_INT) { if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) || (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV))) { impPushOnStack(op1, tiRetVal); break; } } // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand // if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { if (op1->TypeGet() != type) { // We insert a cast of op1 to 'type' op1 = gtNewCastNode(type, op1, false, type); } if (op2->TypeGet() != type) { // We insert a cast of op2 to 'type' op2 = gtNewCastNode(type, op2, false, type); } } if (callNode) { /* These operators can later be transformed into 'GT_CALL' */ assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]); #ifndef TARGET_ARM assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]); assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]); #endif // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying // that we'll need to transform into a general large node, but rather specifically // to a call: by doing it this way, things keep working if there are multiple sizes, // and a CALL is no longer the largest. // That said, as of now it *is* a large node, so we'll do this with an assert rather // than an "if". assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE); op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true)); } else { op1 = gtNewOperNode(oper, type, op1, op2); } /* Special case: integer/long division may throw an exception */ if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this)) { op1->gtFlags |= GTF_EXCEPT; } if (ovfl) { assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL); if (ovflType != TYP_UNKNOWN) { op1->gtType = ovflType; } op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } } impPushOnStack(op1, tiRetVal); break; case CEE_SHL: oper = GT_LSH; goto CEE_SH_OP2; case CEE_SHR: oper = GT_RSH; goto CEE_SH_OP2; case CEE_SHR_UN: oper = GT_RSZ; goto CEE_SH_OP2; CEE_SH_OP2: op2 = impPopStack().val; op1 = impPopStack().val; // operand to be shifted impBashVarAddrsToI(op1, op2); type = genActualType(op1->TypeGet()); op1 = gtNewOperNode(oper, type, op1, op2); impPushOnStack(op1, tiRetVal); break; case CEE_NOT: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); type = genActualType(op1->TypeGet()); impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal); break; case CEE_CKFINITE: op1 = impPopStack().val; type = op1->TypeGet(); op1 = gtNewOperNode(GT_CKFINITE, type, op1); op1->gtFlags |= GTF_EXCEPT; impPushOnStack(op1, tiRetVal); break; case CEE_LEAVE: val = getI4LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val); goto LEAVE; case CEE_LEAVE_S: val = getI1LittleEndian(codeAddr); // jump distance jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val); LEAVE: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE); return; } JITDUMP(" %04X", jmpAddr); if (block->bbJumpKind != BBJ_LEAVE) { impResetLeaveBlock(block, jmpAddr); } assert(jmpAddr == block->bbJumpDest->bbCodeOffs); impImportLeave(block); impNoteBranchOffs(); break; case CEE_BR: case CEE_BR_S: jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); if (compIsForInlining() && jmpDist == 0) { break; /* NOP */ } impNoteBranchOffs(); break; case CEE_BRTRUE: case CEE_BRTRUE_S: case CEE_BRFALSE: case CEE_BRFALSE_S: /* Pop the comparand (now there's a neat term) from the stack */ op1 = impPopStack().val; type = op1->TypeGet(); // Per Ecma-355, brfalse and brtrue are only specified for nint, ref, and byref. // // We've historically been a bit more permissive, so here we allow // any type that gtNewZeroConNode can handle. if (!varTypeIsArithmetic(type) && !varTypeIsGC(type)) { BADCODE("invalid type for brtrue/brfalse"); } if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { op1 = gtUnusedValNode(op1); goto SPILL_APPEND; } else { break; } } if (op1->OperIsCompare()) { if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S) { // Flip the sense of the compare op1 = gtReverseCond(op1); } } else { // We'll compare against an equally-sized integer 0 // For small types, we always compare against int op2 = gtNewZeroConNode(genActualType(op1->gtType)); // Create the comparison operator and try to fold it oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ; op1 = gtNewOperNode(oper, TYP_INT, op1, op2); } // fall through COND_JUMP: /* Fold comparison if we can */ op1 = gtFoldExpr(op1); /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/ /* Don't make any blocks unreachable in import only mode */ if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly()) { /* gtFoldExpr() should prevent this as we don't want to make any blocks unreachable under compDbgCode */ assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); assertImp((block->bbJumpKind == BBJ_COND) // normal case || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the // block for the second time block->bbJumpKind = foldedJumpKind; #ifdef DEBUG if (verbose) { if (op1->AsIntCon()->gtIconVal) { printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n", block->bbJumpDest->bbNum); } else { printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum); } } #endif break; } op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1); /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt' in impImportBlock(block). For correct line numbers, spill stack. */ if (opts.compDbgCode && impCurStmtDI.IsValid()) { impSpillStackEnsure(true); } goto SPILL_APPEND; case CEE_CEQ: oper = GT_EQ; uns = false; goto CMP_2_OPs; case CEE_CGT_UN: oper = GT_GT; uns = true; goto CMP_2_OPs; case CEE_CGT: oper = GT_GT; uns = false; goto CMP_2_OPs; case CEE_CLT_UN: oper = GT_LT; uns = true; goto CMP_2_OPs; case CEE_CLT: oper = GT_LT; uns = false; goto CMP_2_OPs; CMP_2_OPs: op2 = impPopStack().val; op1 = impPopStack().val; // Recognize the IL idiom of CGT_UN(op1, 0) and normalize // it so that downstream optimizations don't have to. if ((opcode == CEE_CGT_UN) && op2->IsIntegralConst(0)) { oper = GT_NE; uns = false; } #ifdef TARGET_64BIT // TODO-Casts: create a helper that upcasts int32 -> native int when necessary. // See also identical code in impGetByRefResultType and STSFLD import. if (varTypeIsI(op1) && (genActualType(op2) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, TYP_I_IMPL); } else if (varTypeIsI(op2) && (genActualType(op1) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1) == genActualType(op2) || (varTypeIsI(op1) && varTypeIsI(op2)) || (varTypeIsFloating(op1) && varTypeIsFloating(op2))); // Create the comparison node. op1 = gtNewOperNode(oper, TYP_INT, op1, op2); // TODO: setting both flags when only one is appropriate. if (uns) { op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED; } // Fold result, if possible. op1 = gtFoldExpr(op1); impPushOnStack(op1, tiRetVal); break; case CEE_BEQ_S: case CEE_BEQ: oper = GT_EQ; goto CMP_2_OPs_AND_BR; case CEE_BGE_S: case CEE_BGE: oper = GT_GE; goto CMP_2_OPs_AND_BR; case CEE_BGE_UN_S: case CEE_BGE_UN: oper = GT_GE; goto CMP_2_OPs_AND_BR_UN; case CEE_BGT_S: case CEE_BGT: oper = GT_GT; goto CMP_2_OPs_AND_BR; case CEE_BGT_UN_S: case CEE_BGT_UN: oper = GT_GT; goto CMP_2_OPs_AND_BR_UN; case CEE_BLE_S: case CEE_BLE: oper = GT_LE; goto CMP_2_OPs_AND_BR; case CEE_BLE_UN_S: case CEE_BLE_UN: oper = GT_LE; goto CMP_2_OPs_AND_BR_UN; case CEE_BLT_S: case CEE_BLT: oper = GT_LT; goto CMP_2_OPs_AND_BR; case CEE_BLT_UN_S: case CEE_BLT_UN: oper = GT_LT; goto CMP_2_OPs_AND_BR_UN; case CEE_BNE_UN_S: case CEE_BNE_UN: oper = GT_NE; goto CMP_2_OPs_AND_BR_UN; CMP_2_OPs_AND_BR_UN: uns = true; unordered = true; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR: uns = false; unordered = false; goto CMP_2_OPs_AND_BR_ALL; CMP_2_OPs_AND_BR_ALL: /* Pull two values */ op2 = impPopStack().val; op1 = impPopStack().val; #ifdef TARGET_64BIT if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT)) { op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL); } #endif // TARGET_64BIT assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) || (varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet())) || (varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))); if (opts.OptimizationEnabled() && (block->bbJumpDest == block->bbNext)) { block->bbJumpKind = BBJ_NONE; if (op1->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op1 side effect")); impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } if (op2->gtFlags & GTF_GLOB_EFFECT) { impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG( "Branch to next Optimization, op2 side effect")); impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } #ifdef DEBUG if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT) { impNoteLastILoffs(); } #endif break; } // We can generate an compare of different sized floating point op1 and op2 // We insert a cast // if (varTypeIsFloating(op1->TypeGet())) { if (op1->TypeGet() != op2->TypeGet()) { assert(varTypeIsFloating(op2->TypeGet())); // say op1=double, op2=float. To avoid loss of precision // while comparing, op2 is converted to double and double // comparison is done. if (op1->TypeGet() == TYP_DOUBLE) { // We insert a cast of op2 to TYP_DOUBLE op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE); } else if (op2->TypeGet() == TYP_DOUBLE) { // We insert a cast of op1 to TYP_DOUBLE op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE); } } } /* Create and append the operator */ op1 = gtNewOperNode(oper, TYP_INT, op1, op2); if (uns) { op1->gtFlags |= GTF_UNSIGNED; } if (unordered) { op1->gtFlags |= GTF_RELOP_NAN_UN; } goto COND_JUMP; case CEE_SWITCH: /* Pop the switch value off the stack */ op1 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op1->TypeGet())); /* We can create a switch node */ op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1); val = (int)getU4LittleEndian(codeAddr); codeAddr += 4 + val * 4; // skip over the switch-table goto SPILL_APPEND; /************************** Casting OPCODES ***************************/ case CEE_CONV_OVF_I1: lclTyp = TYP_BYTE; goto CONV_OVF; case CEE_CONV_OVF_I2: lclTyp = TYP_SHORT; goto CONV_OVF; case CEE_CONV_OVF_I: lclTyp = TYP_I_IMPL; goto CONV_OVF; case CEE_CONV_OVF_I4: lclTyp = TYP_INT; goto CONV_OVF; case CEE_CONV_OVF_I8: lclTyp = TYP_LONG; goto CONV_OVF; case CEE_CONV_OVF_U1: lclTyp = TYP_UBYTE; goto CONV_OVF; case CEE_CONV_OVF_U2: lclTyp = TYP_USHORT; goto CONV_OVF; case CEE_CONV_OVF_U: lclTyp = TYP_U_IMPL; goto CONV_OVF; case CEE_CONV_OVF_U4: lclTyp = TYP_UINT; goto CONV_OVF; case CEE_CONV_OVF_U8: lclTyp = TYP_ULONG; goto CONV_OVF; case CEE_CONV_OVF_I1_UN: lclTyp = TYP_BYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_I2_UN: lclTyp = TYP_SHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_I_UN: lclTyp = TYP_I_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_I4_UN: lclTyp = TYP_INT; goto CONV_OVF_UN; case CEE_CONV_OVF_I8_UN: lclTyp = TYP_LONG; goto CONV_OVF_UN; case CEE_CONV_OVF_U1_UN: lclTyp = TYP_UBYTE; goto CONV_OVF_UN; case CEE_CONV_OVF_U2_UN: lclTyp = TYP_USHORT; goto CONV_OVF_UN; case CEE_CONV_OVF_U_UN: lclTyp = TYP_U_IMPL; goto CONV_OVF_UN; case CEE_CONV_OVF_U4_UN: lclTyp = TYP_UINT; goto CONV_OVF_UN; case CEE_CONV_OVF_U8_UN: lclTyp = TYP_ULONG; goto CONV_OVF_UN; CONV_OVF_UN: uns = true; goto CONV_OVF_COMMON; CONV_OVF: uns = false; goto CONV_OVF_COMMON; CONV_OVF_COMMON: ovfl = true; goto _CONV; case CEE_CONV_I1: lclTyp = TYP_BYTE; goto CONV; case CEE_CONV_I2: lclTyp = TYP_SHORT; goto CONV; case CEE_CONV_I: lclTyp = TYP_I_IMPL; goto CONV; case CEE_CONV_I4: lclTyp = TYP_INT; goto CONV; case CEE_CONV_I8: lclTyp = TYP_LONG; goto CONV; case CEE_CONV_U1: lclTyp = TYP_UBYTE; goto CONV; case CEE_CONV_U2: lclTyp = TYP_USHORT; goto CONV; #if (REGSIZE_BYTES == 8) case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV_UN; #else case CEE_CONV_U: lclTyp = TYP_U_IMPL; goto CONV; #endif case CEE_CONV_U4: lclTyp = TYP_UINT; goto CONV; case CEE_CONV_U8: lclTyp = TYP_ULONG; goto CONV_UN; case CEE_CONV_R4: lclTyp = TYP_FLOAT; goto CONV; case CEE_CONV_R8: lclTyp = TYP_DOUBLE; goto CONV; case CEE_CONV_R_UN: lclTyp = TYP_DOUBLE; goto CONV_UN; CONV_UN: uns = true; ovfl = false; goto _CONV; CONV: uns = false; ovfl = false; goto _CONV; _CONV: // only converts from FLOAT or DOUBLE to an integer type // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls if (varTypeIsFloating(lclTyp)) { callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl #ifdef TARGET_64BIT // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? // TYP_BYREF could be used as TYP_I_IMPL which is long. // TODO-CQ: remove this when we lower casts long/ulong --> float/double // and generate SSE2 code instead of going through helper calls. || (impStackTop().val->TypeGet() == TYP_BYREF) #endif ; } else { callNode = varTypeIsFloating(impStackTop().val->TypeGet()); } op1 = impPopStack().val; impBashVarAddrsToI(op1); // Casts from floating point types must not have GTF_UNSIGNED set. if (varTypeIsFloating(op1)) { uns = false; } // At this point uns, ovf, callNode are all set. if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND) { op2 = op1->AsOp()->gtOp2; if (op2->gtOper == GT_CNS_INT) { ssize_t ival = op2->AsIntCon()->gtIconVal; ssize_t mask, umask; switch (lclTyp) { case TYP_BYTE: case TYP_UBYTE: mask = 0x00FF; umask = 0x007F; break; case TYP_USHORT: case TYP_SHORT: mask = 0xFFFF; umask = 0x7FFF; break; default: assert(!"unexpected type"); return; } if (((ival & umask) == ival) || ((ival & mask) == ival && uns)) { /* Toss the cast, it's a waste of time */ impPushOnStack(op1, tiRetVal); break; } else if (ival == mask) { /* Toss the masking, it's a waste of time, since we sign-extend from the small value anyways */ op1 = op1->AsOp()->gtOp1; } } } /* The 'op2' sub-operand of a cast is the 'real' type number, since the result of a cast to one of the 'small' integer types is an integer. */ type = genActualType(lclTyp); // If this is a no-op cast, just use op1. if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp))) { // Nothing needs to change } // Work is evidently required, add cast node else { if (callNode) { op1 = gtNewCastNodeL(type, op1, uns, lclTyp); } else { op1 = gtNewCastNode(type, op1, uns, lclTyp); } if (ovfl) { op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT); } if (op1->gtGetOp1()->OperIsConst() && opts.OptimizationEnabled()) { // Try and fold the introduced cast op1 = gtFoldExprConst(op1); } } impPushOnStack(op1, tiRetVal); break; case CEE_NEG: op1 = impPopStack().val; impBashVarAddrsToI(op1, nullptr); impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal); break; case CEE_POP: { /* Pull the top value from the stack */ StackEntry se = impPopStack(); clsHnd = se.seTypeInfo.GetClassHandle(); op1 = se.val; /* Get hold of the type of the value being duplicated */ lclTyp = genActualType(op1->gtType); /* Does the value have any side effects? */ if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode) { // Since we are throwing away the value, just normalize // it to its address. This is more efficient. if (varTypeIsStruct(op1)) { JITDUMP("\n ... CEE_POP struct ...\n"); DISPTREE(op1); #ifdef UNIX_AMD64_ABI // Non-calls, such as obj or ret_expr, have to go through this. // Calls with large struct return value have to go through this. // Helper calls with small struct return value also have to go // through this since they do not follow Unix calling convention. if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd, op1->AsCall()->GetUnmanagedCallConv()) || op1->AsCall()->gtCallType == CT_HELPER) #endif // UNIX_AMD64_ABI { // If the value being produced comes from loading // via an underlying address, just null check the address. if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ)) { gtChangeOperToNullCheck(op1, block); } else { op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false); } JITDUMP("\n ... optimized to ...\n"); DISPTREE(op1); } } // If op1 is non-overflow cast, throw it away since it is useless. // Another reason for throwing away the useless cast is in the context of // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)). // The cast gets added as part of importing GT_CALL, which gets in the way // of fgMorphCall() on the forms of tail call nodes that we assert. if ((op1->gtOper == GT_CAST) && !op1->gtOverflow()) { op1 = op1->AsOp()->gtOp1; } if (op1->gtOper != GT_CALL) { if ((op1->gtFlags & GTF_SIDE_EFFECT) != 0) { op1 = gtUnusedValNode(op1); } else { // Can't bash to NOP here because op1 can be referenced from `currentBlock->bbEntryState`, // if we ever need to reimport we need a valid LCL_VAR on it. op1 = gtNewNothingNode(); } } /* Append the value to the tree list */ goto SPILL_APPEND; } /* No side effects - just throw the <BEEP> thing away */ } break; case CEE_DUP: { StackEntry se = impPopStack(); GenTree* tree = se.val; tiRetVal = se.seTypeInfo; op1 = tree; // If the expression to dup is simple, just clone it. // Otherwise spill it to a temp, and reload the temp twice. bool cloneExpr = false; if (!opts.compDbgCode) { // Duplicate 0 and +0.0 if (op1->IsIntegralConst(0) || op1->IsFloatPositiveZero()) { cloneExpr = true; } // Duplicate locals and addresses of them else if (op1->IsLocal()) { cloneExpr = true; } else if (op1->TypeIs(TYP_BYREF) && op1->OperIs(GT_ADDR) && op1->gtGetOp1()->IsLocal() && (OPCODE)impGetNonPrefixOpcode(codeAddr + sz, codeEndp) != CEE_INITOBJ) { cloneExpr = true; } } else { // Always clone for debug mode cloneExpr = true; } if (!cloneExpr) { const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill")); impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types type = genActualType(lvaTable[tmpNum].TypeGet()); op1 = gtNewLclvNode(tmpNum, type); // Propagate type info to the temp from the stack and the original tree if (type == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", tmpNum); lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle()); } } op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("DUP instruction")); assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT)); impPushOnStack(op1, tiRetVal); impPushOnStack(op2, tiRetVal); } break; case CEE_STIND_I1: lclTyp = TYP_BYTE; goto STIND; case CEE_STIND_I2: lclTyp = TYP_SHORT; goto STIND; case CEE_STIND_I4: lclTyp = TYP_INT; goto STIND; case CEE_STIND_I8: lclTyp = TYP_LONG; goto STIND; case CEE_STIND_I: lclTyp = TYP_I_IMPL; goto STIND; case CEE_STIND_REF: lclTyp = TYP_REF; goto STIND; case CEE_STIND_R4: lclTyp = TYP_FLOAT; goto STIND; case CEE_STIND_R8: lclTyp = TYP_DOUBLE; goto STIND; STIND: op2 = impPopStack().val; // value to store op1 = impPopStack().val; // address to store to // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); impBashVarAddrsToI(op1, op2); op2 = impImplicitR4orR8Cast(op2, lclTyp); #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // TARGET_64BIT if (opcode == CEE_STIND_REF) { // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType)); lclTyp = genActualType(op2->TypeGet()); } // Check target type. #ifdef DEBUG if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF) { if (op2->gtType == TYP_BYREF) { assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL); } else if (lclTyp == TYP_BYREF) { assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType)); } } else { assertImp(genActualType(op2->gtType) == genActualType(lclTyp) || ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp))); } #endif op1 = gtNewOperNode(GT_IND, lclTyp, op1); // stind could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE; if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } op1 = gtNewAssignNode(op1, op2); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; // Spill side-effects AND global-data-accesses if (verCurrentState.esStackDepth > 0) { impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND")); } goto APPEND; case CEE_LDIND_I1: lclTyp = TYP_BYTE; goto LDIND; case CEE_LDIND_I2: lclTyp = TYP_SHORT; goto LDIND; case CEE_LDIND_U4: case CEE_LDIND_I4: lclTyp = TYP_INT; goto LDIND; case CEE_LDIND_I8: lclTyp = TYP_LONG; goto LDIND; case CEE_LDIND_REF: lclTyp = TYP_REF; goto LDIND; case CEE_LDIND_I: lclTyp = TYP_I_IMPL; goto LDIND; case CEE_LDIND_R4: lclTyp = TYP_FLOAT; goto LDIND; case CEE_LDIND_R8: lclTyp = TYP_DOUBLE; goto LDIND; case CEE_LDIND_U1: lclTyp = TYP_UBYTE; goto LDIND; case CEE_LDIND_U2: lclTyp = TYP_USHORT; goto LDIND; LDIND: op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); #ifdef TARGET_64BIT // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (genActualType(op1->gtType) == TYP_INT) { op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL); } #endif assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, lclTyp, op1); // ldind could point anywhere, example a boxed class static int op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE); if (prefixFlags & PREFIX_VOLATILE) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert(op1->OperGet() == GT_IND); op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; case CEE_UNALIGNED: assert(sz == 1); val = getU1LittleEndian(codeAddr); ++codeAddr; JITDUMP(" %u", val); if ((val != 1) && (val != 2) && (val != 4)) { BADCODE("Alignment unaligned. must be 1, 2, or 4"); } Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes"); prefixFlags |= PREFIX_UNALIGNED; impValidateMemoryAccessOpcode(codeAddr, codeEndp, false); PREFIX: opcode = (OPCODE)getU1LittleEndian(codeAddr); opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode); codeAddr += sizeof(__int8); goto DECODE_OPCODE; case CEE_VOLATILE: Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes"); prefixFlags |= PREFIX_VOLATILE; impValidateMemoryAccessOpcode(codeAddr, codeEndp, true); assert(sz == 0); goto PREFIX; case CEE_LDFTN: { // Need to do a lookup here so that we perform an access check // and do a NOWAY if protections are violated _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); DO_LDFTN: op1 = impMethodPointer(&resolvedToken, &callInfo); if (compDonotInline()) { return; } // Call info may have more precise information about the function than // the resolved token. CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(callInfo.hMethod != nullptr); heapToken->hMethod = callInfo.hMethod; impPushOnStack(op1, typeInfo(heapToken)); break; } case CEE_LDVIRTFTN: { /* Get the method token */ _impResolveToken(CORINFO_TOKENKIND_Method); JITDUMP(" %08X", resolvedToken.token); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */, combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN), CORINFO_CALLINFO_CALLVIRT), &callInfo); // This check really only applies to intrinsic Array.Address methods if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE) { NO_WAY("Currently do not support LDFTN of Parameterized functions"); } mflags = callInfo.methodFlags; impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); if (compIsForInlining()) { if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL); return; } } CORINFO_SIG_INFO& ftnSig = callInfo.sig; /* Get the object-ref */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); if (opts.IsReadyToRun()) { if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } } else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL)) { if (op1->gtFlags & GTF_SIDE_EFFECT) { op1 = gtUnusedValNode(op1); impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } goto DO_LDFTN; } GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo); if (compDonotInline()) { return; } CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken); assert(heapToken->tokenType == CORINFO_TOKENKIND_Method); assert(callInfo.hMethod != nullptr); heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn; heapToken->hMethod = callInfo.hMethod; impPushOnStack(fptr, typeInfo(heapToken)); break; } case CEE_CONSTRAINED: assertImp(sz == sizeof(unsigned)); impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained); codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually JITDUMP(" (%08X) ", constrainedResolvedToken.token); Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes"); prefixFlags |= PREFIX_CONSTRAINED; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_CALLVIRT && actualOpcode != CEE_CALL && actualOpcode != CEE_LDFTN) { BADCODE("constrained. has to be followed by callvirt, call or ldftn"); } } goto PREFIX; case CEE_READONLY: JITDUMP(" readonly."); Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes"); prefixFlags |= PREFIX_READONLY; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("readonly. has to be followed by ldelema or call"); } } assert(sz == 0); goto PREFIX; case CEE_TAILCALL: JITDUMP(" tail."); Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; { OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp); if (!impOpcodeIsCallOpcode(actualOpcode)) { BADCODE("tailcall. has to be followed by call, callvirt or calli"); } } assert(sz == 0); goto PREFIX; case CEE_NEWOBJ: /* Since we will implicitly insert newObjThisPtr at the start of the argument list, spill any GTF_ORDER_SIDEEFF */ impSpillSpecialSideEff(); /* NEWOBJ does not respond to TAIL */ prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT; /* NEWOBJ does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; _impResolveToken(CORINFO_TOKENKIND_NewObj); eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/, combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM), &callInfo); mflags = callInfo.methodFlags; if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0) { BADCODE("newobj on static or abstract method"); } // Insert the security callout before any actual code is generated impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); // There are three different cases for new // Object size is variable (depends on arguments) // 1) Object is an array (arrays treated specially by the EE) // 2) Object is some other variable sized object (e.g. String) // 3) Class Size can be determined beforehand (normal case) // In the first case, we need to call a NEWOBJ helper (multinewarray) // in the second case we call the constructor with a '0' this pointer // In the third case we alloc the memory, then call the constuctor clsFlags = callInfo.classFlags; if (clsFlags & CORINFO_FLG_ARRAY) { // Arrays need to call the NEWOBJ helper. assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE); impImportNewObjArray(&resolvedToken, &callInfo); if (compDonotInline()) { return; } callTyp = TYP_REF; break; } // At present this can only be String else if (clsFlags & CORINFO_FLG_VAROBJSIZE) { // Skip this thisPtr argument newObjThisPtr = nullptr; /* Remember that this basic block contains 'new' of an object */ block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; } else { // This is the normal case where the size of the object is // fixed. Allocate the memory and call the constructor. // Note: We cannot add a peep to avoid use of temp here // becase we don't have enough interference info to detect when // sources and destination interfere, example: s = new S(ref); // TODO: We find the correct place to introduce a general // reverse copy prop for struct return values from newobj or // any function returning structs. /* get a temporary for the new object */ lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp")); if (compDonotInline()) { // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS. assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS); return; } // In the value class case we only need clsHnd for size calcs. // // The lookup of the code pointer will be handled by CALL in this case if (clsFlags & CORINFO_FLG_VALUECLASS) { if (compIsForInlining()) { // If value class has GC fields, inform the inliner. It may choose to // bail out on the inline. DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; // some policies do not track the relative hotness of call sites for // "always" inline cases. if (impInlineInfo->iciBlock->isRunRarely()) { compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (compInlineResult->IsFailure()) { return; } } } } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lvaTable[lclNum].lvType = JITtype2varType(jitTyp); } else { // The local variable itself is the allocated space. // Here we need unsafe value cls check, since the address of struct is taken for further use // and potentially exploitable. lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */); } bool bbInALoop = impBlockIsInALoop(block); bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) && (!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN)); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { // Append a tree to zero-out the temp newObjThisPtr = gtNewLclvNode(lclNum, lclDsc->TypeGet()); newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest gtNewIconNode(0), // Value false, // isVolatile false); // not copyBlock impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); } else { JITDUMP("\nSuppressing zero-init for V%02u -- expect to zero in prolog\n", lclNum); lclDsc->lvSuppressedZeroInit = 1; compSuppressedZeroInit = true; } // Obtain the address of the temp newObjThisPtr = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet())); } else { // If we're newing up a finalizable object, spill anything that can cause exceptions. // bool hasSideEffects = false; CorInfoHelpFunc newHelper = info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd, &hasSideEffects); if (hasSideEffects) { JITDUMP("\nSpilling stack for finalizable newobj\n"); impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("finalizable newobj spill")); } const bool useParent = true; op1 = gtNewAllocObjNode(&resolvedToken, useParent); if (op1 == nullptr) { return; } // Remember that this basic block contains 'new' of an object block->bbFlags |= BBF_HAS_NEWOBJ; optMethodFlags |= OMF_HAS_NEWOBJ; // Append the assignment to the temp/local. Dont need to spill // at all as we are just calling an EE-Jit helper which can only // cause an (async) OutOfMemoryException. // We assign the newly allocated object (by a GT_ALLOCOBJ node) // to a temp. Note that the pattern "temp = allocObj" is required // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes // without exhaustive walk over all expressions. impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE); assert(lvaTable[lclNum].lvSingleDef == 0); lvaTable[lclNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def local\n", lclNum); lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */); newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF); } } goto CALL; case CEE_CALLI: /* CALLI does not respond to CONSTRAINED */ prefixFlags &= ~PREFIX_CONSTRAINED; FALLTHROUGH; case CEE_CALLVIRT: case CEE_CALL: // We can't call getCallInfo on the token from a CALLI, but we need it in // many other places. We unfortunately embed that knowledge here. if (opcode != CEE_CALLI) { _impResolveToken(CORINFO_TOKENKIND_Method); eeGetCallInfo(&resolvedToken, (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr, // this is how impImportCall invokes getCallInfo combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT : CORINFO_CALLINFO_NONE), &callInfo); } else { // Suppress uninitialized use warning. memset(&resolvedToken, 0, sizeof(resolvedToken)); memset(&callInfo, 0, sizeof(callInfo)); resolvedToken.token = getU4LittleEndian(codeAddr); resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; } CALL: // memberRef should be set. // newObjThisPtr should be set for CEE_NEWOBJ JITDUMP(" %08X", resolvedToken.token); constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0; bool newBBcreatedForTailcallStress; bool passedStressModeValidation; newBBcreatedForTailcallStress = false; passedStressModeValidation = true; if (compIsForInlining()) { if (compDonotInline()) { return; } // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks. assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0); } else { if (compTailCallStress()) { // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()? // Tail call stress only recognizes call+ret patterns and forces them to be // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress // doesn't import 'ret' opcode following the call into the basic block containing // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks() // is already checking that there is an opcode following call and hence it is // safe here to read next opcode without bounds check. newBBcreatedForTailcallStress = impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't // make it jump to RET. (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT); if (newBBcreatedForTailcallStress && !hasTailPrefix) { // Do a more detailed evaluation of legality const bool returnFalseIfInvalid = true; const bool passedConstraintCheck = verCheckTailCallConstraint(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, returnFalseIfInvalid); if (passedConstraintCheck) { // Now check with the runtime CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod; bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) || (callInfo.kind == CORINFO_VIRTUALCALL_VTABLE); CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd; if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd, hasTailPrefix)) // Is it legal to do tailcall? { // Stress the tailcall. JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)"); prefixFlags |= PREFIX_TAILCALL_EXPLICIT; prefixFlags |= PREFIX_TAILCALL_STRESS; } else { // Runtime disallows this tail call JITDUMP(" (Tailcall stress: runtime preventing tailcall)"); passedStressModeValidation = false; } } else { // Constraints disallow this tail call JITDUMP(" (Tailcall stress: constraint check failed)"); passedStressModeValidation = false; } } } } // This is split up to avoid goto flow warnings. bool isRecursive; isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd); // If we've already disqualified this call as a tail call under tail call stress, // don't consider it for implicit tail calling either. // // When not running under tail call stress, we may mark this call as an implicit // tail call candidate. We'll do an "equivalent" validation during impImportCall. // // Note that when running under tail call stress, a call marked as explicit // tail prefixed will not be considered for implicit tail calling. if (passedStressModeValidation && impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive)) { if (compIsForInlining()) { #if FEATURE_TAILCALL_OPT_SHARED_RETURN // Are we inlining at an implicit tail call site? If so the we can flag // implicit tail call sites in the inline body. These call sites // often end up in non BBJ_RETURN blocks, so only flag them when // we're able to handle shared returns. if (impInlineInfo->iciCall->IsImplicitTailCall()) { JITDUMP("\n (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN } else { JITDUMP("\n (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)"); prefixFlags |= PREFIX_TAILCALL_IMPLICIT; } } // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call). explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0; readonlyCall = (prefixFlags & PREFIX_READONLY) != 0; if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ) { // All calls and delegates need a security callout. // For delegates, this is the call to the delegate constructor, not the access check on the // LD(virt)FTN. impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper); } callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr, newObjThisPtr, prefixFlags, &callInfo, opcodeOffs); if (compDonotInline()) { // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue. assert((callTyp == TYP_UNDEF) || (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS)); return; } if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we // have created a new BB after the "call" // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless. { assert(!compIsForInlining()); goto RET; } break; case CEE_LDFLD: case CEE_LDSFLD: case CEE_LDFLDA: case CEE_LDSFLDA: { bool isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA); bool isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA); /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; CORINFO_CLASS_HANDLE objType = nullptr; // used for fields if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA) { tiObj = &impStackTop().seTypeInfo; StackEntry se = impPopStack(); objType = se.seTypeInfo.GetClassHandle(); obj = se.val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; clsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER); return; default: break; } if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT && clsHnd) { if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) && !(info.compFlags & CORINFO_FLG_FORCEINLINE)) { // Loading a static valuetype field usually will cause a JitHelper to be called // for the static base. This will bloat the code. compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS); if (compInlineResult->IsFailure()) { return; } } } } tiRetVal = verMakeTypeInfo(ciType, clsHnd); if (isLoadAddress) { tiRetVal.MakeByRef(); } else { tiRetVal.NormaliseForStack(); } // Perform this check always to ensure that we get field access exceptions even with // SkipVerification. impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static load accesses non-static field if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj. if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } bool usesHelper = false; switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { // If the object is a struct, what we really want is // for the field to operate on the address of the struct. if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj)) { assert(opcode == CEE_LDFLD && objType != nullptr); obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true); } /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If the object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } // wrap it in a address of operator if necessary if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1); } else { if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation if (isLoadAddress) { op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1); } break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, nullptr); usesHelper = true; break; case CORINFO_FIELD_STATIC_ADDRESS: // Replace static read-only fields with constant if possible if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) && !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) && (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp))) { CorInfoInitClassResult initClassResult = info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd, impTokenLookupContextHandle); if (initClassResult & CORINFO_INITCLASS_INITIALIZED) { void** pFldAddr = nullptr; void* fldAddr = info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr); // We should always be able to access this static's address directly // assert(pFldAddr == nullptr); op1 = impImportStaticReadOnlyField(fldAddr, lclTyp); // Widen small types since we're propagating the value // instead of producing an indir. // op1->gtType = genActualType(lclTyp); goto FIELD_DONE; } } FALLTHROUGH; case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; case CORINFO_FIELD_INTRINSIC_ZERO: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); op1 = gtNewIconNode(0, lclTyp); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_EMPTY_STRING: { assert(aflags & CORINFO_ACCESS_GET); // Import String.Empty as "" (GT_CNS_STR with a fake SconCPX = 0) op1 = gtNewSconNode(EMPTY_STRING_SCON, nullptr); goto FIELD_DONE; } break; case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN: { assert(aflags & CORINFO_ACCESS_GET); // Widen to stack type lclTyp = genActualType(lclTyp); #if BIGENDIAN op1 = gtNewIconNode(0, lclTyp); #else op1 = gtNewIconNode(1, lclTyp); #endif goto FIELD_DONE; } break; default: assert(!"Unexpected fieldAccessor"); } if (!isLoadAddress) { if (prefixFlags & PREFIX_VOLATILE) { op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_VOLATILE; } } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { if (!usesHelper) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) || (op1->OperGet() == GT_OBJ)); op1->gtFlags |= GTF_IND_UNALIGNED; } } } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } FIELD_DONE: impPushOnStack(op1, tiRetVal); } break; case CEE_STFLD: case CEE_STSFLD: { bool isStoreStatic = (opcode == CEE_STSFLD); CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type) /* Get the CP_Fieldref index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Field); JITDUMP(" %08X", resolvedToken.token); int aflags = CORINFO_ACCESS_SET; GenTree* obj = nullptr; typeInfo* tiObj = nullptr; typeInfo tiVal; /* Pull the value from the stack */ StackEntry se = impPopStack(); op2 = se.val; tiVal = se.seTypeInfo; clsHnd = tiVal.GetClassHandle(); if (opcode == CEE_STFLD) { tiObj = &impStackTop().seTypeInfo; obj = impPopStack().val; if (impIsThis(obj)) { aflags |= CORINFO_ACCESS_THIS; } } eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo); // Figure out the type of the member. We always call canAccessField, so you always need this // handle CorInfoType ciType = fieldInfo.fieldType; fieldClsHnd = fieldInfo.structType; lclTyp = JITtype2varType(ciType); if (compIsForInlining()) { /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or * per-inst static? */ switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_STATIC_TLS: compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER); return; case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: /* We may be able to inline the field accessors in specific instantiations of generic * methods */ compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER); return; default: break; } } impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper); // Raise InvalidProgramException if static store accesses non-static field if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0)) { BADCODE("static access on an instance field"); } // We are using stfld on a static field. // We allow it, but need to eval any side-effects for obj if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr) { if (obj->gtFlags & GTF_SIDE_EFFECT) { obj = gtUnusedValNode(obj); impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); } obj = nullptr; } /* Preserve 'small' int types */ if (!varTypeIsSmall(lclTyp)) { lclTyp = genActualType(lclTyp); } switch (fieldInfo.fieldAccessor) { case CORINFO_FIELD_INSTANCE: #ifdef FEATURE_READYTORUN case CORINFO_FIELD_INSTANCE_WITH_BASE: #endif { /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset); DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass); if (StructHasOverlappingFields(typeFlags)) { op1->AsField()->gtFldMayOverlap = true; } #ifdef FEATURE_READYTORUN if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE) { op1->AsField()->gtFieldLookup = fieldInfo.fieldLookup; } #endif op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT); if (fgAddrCouldBeNull(obj)) { op1->gtFlags |= GTF_EXCEPT; } // If object is a BYREF then our target is a value class and // it could point anywhere, example a boxed class static int if (obj->gtType == TYP_BYREF) { op1->gtFlags |= GTF_IND_TGTANYWHERE; } if (compIsForInlining() && impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, nullptr, obj, impInlineInfo->inlArgInfo)) { impInlineInfo->thisDereferencedFirst = true; } } break; case CORINFO_FIELD_STATIC_TLS: #ifdef TARGET_X86 // Legacy TLS access is implemented as intrinsic on x86 only /* Create the data member node */ op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset); op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation break; #else fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER; FALLTHROUGH; #endif case CORINFO_FIELD_STATIC_ADDR_HELPER: case CORINFO_FIELD_INSTANCE_HELPER: case CORINFO_FIELD_INSTANCE_ADDR_HELPER: op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp, clsHnd, op2); goto SPILL_APPEND; case CORINFO_FIELD_STATIC_ADDRESS: case CORINFO_FIELD_STATIC_RVA_ADDRESS: case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER: case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER: case CORINFO_FIELD_STATIC_READYTORUN_HELPER: op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp); break; default: assert(!"Unexpected fieldAccessor"); } // Create the member assignment, unless we have a TYP_STRUCT. bool deferStructAssign = (lclTyp == TYP_STRUCT); if (!deferStructAssign) { if (prefixFlags & PREFIX_VOLATILE) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered op1->gtFlags |= GTF_IND_VOLATILE; } if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp)) { assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND)); op1->gtFlags |= GTF_IND_UNALIGNED; } /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during importation and reads from the union as if it were a long during code generation. Though this can potentially read garbage, one can get lucky to have this working correctly. This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency on it. To be backward compatible, we will explicitly add an upward cast here so that it works correctly always. Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT for V4.0. */ CLANG_FORMAT_COMMENT_ANCHOR; #ifndef TARGET_64BIT // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be // generated for ARM as well as x86, so the following IR will be accepted: // STMTx (IL 0x... ???) // * ASG long // +--* CLS_VAR long // \--* CNS_INT int 2 if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) && varTypeIsLong(op1->TypeGet())) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } #endif #ifdef TARGET_64BIT // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType)) { op2->gtType = TYP_I_IMPL; } else { // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity // if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT)) { op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT); } // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity // if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT)) { op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL); } } #endif // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE // We insert a cast to the dest 'op1' type // if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType)) { op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet()); } op1 = gtNewAssignNode(op1, op2); /* Mark the expression as containing an assignment */ op1->gtFlags |= GTF_ASG; } /* Check if the class needs explicit initialization */ if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS) { GenTree* helperNode = impInitClass(&resolvedToken); if (compDonotInline()) { return; } if (helperNode != nullptr) { op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1); } } /* stfld can interfere with value classes (consider the sequence ldloc, ldloca, ..., stfld, stloc). We will be conservative and spill all value class references from the stack. */ if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL))) { assert(tiObj); // If we can resolve the field to be within some local, // then just spill that local. // GenTreeLclVarCommon* const lcl = obj->IsLocalAddrExpr(); if (lcl != nullptr) { impSpillLclRefs(lcl->GetLclNum()); } else if (impIsValueType(tiObj)) { impSpillEvalStack(); } else { impSpillValueClasses(); } } /* Spill any refs to the same member from the stack */ impSpillLclRefs((ssize_t)resolvedToken.hField); /* stsfld also interferes with indirect accesses (for aliased statics) and calls. But don't need to spill other statics as we have explicitly spilled this particular static field. */ impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD")); if (deferStructAssign) { op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL); } } goto APPEND; case CEE_NEWARR: { /* Get the class type index operand */ _impResolveToken(CORINFO_TOKENKIND_Newarr); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } tiRetVal = verMakeTypeInfo(resolvedToken.hClass); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); /* Form the arglist: array class handle, size */ op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); #ifdef TARGET_64BIT // The array helper takes a native int for array length. // So if we have an int, explicitly extend it to be a native int. if (genActualType(op2->TypeGet()) != TYP_I_IMPL) { if (op2->IsIntegralConst()) { op2->gtType = TYP_I_IMPL; } else { bool isUnsigned = false; op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL); } } #endif // TARGET_64BIT #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF, gtNewCallArgs(op2)); usingReadyToRunHelper = (op1 != nullptr); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the newarr call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub // 3) Allocate the new array // Reason: performance (today, we'll always use the slow helper for the R2R generics case) // Need to restore array classes before creating array objects on the heap op1 = impTokenToHandle(&resolvedToken, nullptr, true /*mustRestoreHandle*/); if (op1 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { GenTreeCall::Use* args = gtNewCallArgs(op1, op2); /* Create a call to 'new' */ // Note that this only works for shared generic code because the same helper is used for all // reference array types op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args); } op1->AsCall()->compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass; /* Remember that this basic block contains 'new' of an sd array */ block->bbFlags |= BBF_HAS_NEWARRAY; optMethodFlags |= OMF_HAS_NEWARRAY; /* Push the result of the call on the stack */ impPushOnStack(op1, tiRetVal); callTyp = TYP_REF; } break; case CEE_LOCALLOC: // We don't allow locallocs inside handlers if (block->hasHndIndex()) { BADCODE("Localloc can't be inside handler"); } // Get the size to allocate op2 = impPopStack().val; assertImp(genActualTypeIsIntOrI(op2->gtType)); if (verCurrentState.esStackDepth != 0) { BADCODE("Localloc can only be used when the stack is empty"); } // If the localloc is not in a loop and its size is a small constant, // create a new local var of TYP_BLK and return its address. { bool convertedToLocal = false; // Need to aggressively fold here, as even fixed-size locallocs // will have casts in the way. op2 = gtFoldExpr(op2); if (op2->IsIntegralConst()) { const ssize_t allocSize = op2->AsIntCon()->IconValue(); bool bbInALoop = impBlockIsInALoop(block); if (allocSize == 0) { // Result is nullptr JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n"); op1 = gtNewIconNode(0, TYP_I_IMPL); convertedToLocal = true; } else if ((allocSize > 0) && !bbInALoop) { // Get the size threshold for local conversion ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE; #ifdef DEBUG // Optionally allow this to be modified maxSize = JitConfig.JitStackAllocToLocalSize(); #endif // DEBUG if (allocSize <= maxSize) { const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal")); JITDUMP("Converting stackalloc of %zd bytes to new local V%02u\n", allocSize, stackallocAsLocal); lvaTable[stackallocAsLocal].lvType = TYP_BLK; lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize; lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true; op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK); op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1); convertedToLocal = true; if (!this->opts.compDbgEnC) { // Ensure we have stack security for this method. // Reorder layout since the converted localloc is treated as an unsafe buffer. setNeedsGSSecurityCookie(); compGSReorderStackLayout = true; } } } } if (!convertedToLocal) { // Bail out if inlining and the localloc was not converted. // // Note we might consider allowing the inline, if the call // site is not in a loop. if (compIsForInlining()) { InlineObservation obs = op2->IsIntegralConst() ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN; compInlineResult->NoteFatal(obs); return; } op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2); // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd. op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE); // Ensure we have stack security for this method. setNeedsGSSecurityCookie(); /* The FP register may not be back to the original value at the end of the method, even if the frame size is 0, as localloc may have modified it. So we will HAVE to reset it */ compLocallocUsed = true; } else { compLocallocOptimized = true; } } impPushOnStack(op1, tiRetVal); break; case CEE_ISINST: { /* Get the type token */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the isinstanceof_any call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Perform the 'is instance' check on the input object // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false, opcodeOffs); } if (compDonotInline()) { return; } impPushOnStack(op1, tiRetVal); } break; } case CEE_REFANYVAL: // get the class handle and make a ICON node out of it _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); // Call helper GETREFANY(classHandle, op1); op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, gtNewCallArgs(op2, op1)); impPushOnStack(op1, tiRetVal); break; case CEE_REFANYTYPE: op1 = impPopStack().val; // make certain it is normalized; op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL); if (op1->gtOper == GT_OBJ) { // Get the address of the refany op1 = op1->AsOp()->gtOp1; // Fetch the type from the correct slot op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL)); op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1); } else { assertImp(op1->gtOper == GT_MKREFANY); // The pointer may have side-effects if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT) { impAppendTree(op1->AsOp()->gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); #ifdef DEBUG impNoteLastILoffs(); #endif } // We already have the class handle op1 = op1->AsOp()->gtOp2; } // convert native TypeHandle to RuntimeTypeHandle { GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT, helperArgs); CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass(); // The handle struct is returned in register op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); op1->AsCall()->gtRetClsHnd = classHandle; #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, classHandle, op1->AsCall()->GetUnmanagedCallConv()); #endif tiRetVal = typeInfo(TI_STRUCT, classHandle); } impPushOnStack(op1, tiRetVal); break; case CEE_LDTOKEN: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); lastLoadToken = codeAddr; _impResolveToken(CORINFO_TOKENKIND_Ldtoken); tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken); op1 = impTokenToHandle(&resolvedToken, nullptr, true); if (op1 == nullptr) { // compDonotInline() return; } helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE; assert(resolvedToken.hClass != nullptr); if (resolvedToken.hMethod != nullptr) { helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD; } else if (resolvedToken.hField != nullptr) { helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD; } GenTreeCall::Use* helperArgs = gtNewCallArgs(op1); op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs); // The handle struct is returned in register and // it could be consumed both as `TYP_STRUCT` and `TYP_REF`. op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType(); #if FEATURE_MULTIREG_RET op1->AsCall()->InitializeStructReturnType(this, tokenType, op1->AsCall()->GetUnmanagedCallConv()); #endif op1->AsCall()->gtRetClsHnd = tokenType; tiRetVal = verMakeTypeInfo(tokenType); impPushOnStack(op1, tiRetVal); } break; case CEE_UNBOX: case CEE_UNBOX_ANY: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); bool runtimeLookup; op2 = impTokenToHandle(&resolvedToken, &runtimeLookup); if (op2 == nullptr) { assert(compDonotInline()); return; } // Run this always so we can get access exceptions even with SkipVerification. accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n"); op1 = impPopStack().val; goto CASTCLASS; } /* Pop the object and create the unbox helper call */ /* You might think that for UNBOX_ANY we need to push a different */ /* (non-byref) type, but here we're making the tiRetVal that is used */ /* for the intermediate pointer which we then transfer onto the OBJ */ /* instruction. OBJ then creates the appropriate tiRetVal. */ op1 = impPopStack().val; assertImp(op1->gtType == TYP_REF); helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass); assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE); // Check legality and profitability of inline expansion for unboxing. const bool canExpandInline = (helper == CORINFO_HELP_UNBOX); const bool shouldExpandInline = !compCurBB->isRunRarely() && opts.OptimizationEnabled(); if (canExpandInline && shouldExpandInline) { // See if we know anything about the type of op1, the object being unboxed. bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE clsHnd = gtGetClassHandle(op1, &isExact, &isNonNull); // We can skip the "exact" bit here as we are comparing to a value class. // compareTypesForEquality should bail on comparisions for shared value classes. if (clsHnd != NO_CLASS_HANDLE) { const TypeCompareState compare = info.compCompHnd->compareTypesForEquality(resolvedToken.hClass, clsHnd); if (compare == TypeCompareState::Must) { JITDUMP("\nOptimizing %s (%s) -- type test will succeed\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", eeGetClassName(clsHnd)); // For UNBOX, null check (if necessary), and then leave the box payload byref on the stack. if (opcode == CEE_UNBOX) { GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("optimized unbox clone")); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, boxPayloadOffset); GenTree* nullcheck = gtNewNullCheck(op1, block); GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress); impPushOnStack(result, tiRetVal); break; } // For UNBOX.ANY load the struct from the box payload byref (the load will nullcheck) assert(opcode == CEE_UNBOX_ANY); GenTree* boxPayloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* boxPayloadAddress = gtNewOperNode(GT_ADD, TYP_BYREF, op1, boxPayloadOffset); impPushOnStack(boxPayloadAddress, tiRetVal); oper = GT_OBJ; goto OBJ; } else { JITDUMP("\nUnable to optimize %s -- can't resolve type comparison\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); } } else { JITDUMP("\nUnable to optimize %s -- class for [%06u] not known\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", dspTreeID(op1)); } JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY"); // we are doing normal unboxing // inline the common case of the unbox helper // UNBOX(exp) morphs into // clone = pop(exp); // ((*clone == typeToken) ? nop : helper(clone, typeToken)); // push(clone + TARGET_POINTER_SIZE) // GenTree* cloneOperand; op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone1")); op1 = gtNewMethodTableLookup(op1); GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2); op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("inline UNBOX clone2")); op2 = impTokenToHandle(&resolvedToken); if (op2 == nullptr) { // compDonotInline() return; } op1 = gtNewHelperCallNode(helper, TYP_VOID, gtNewCallArgs(op2, op1)); op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1); op1 = gtNewQmarkNode(TYP_VOID, condBox, op1->AsColon()); // QMARK nodes cannot reside on the evaluation stack. Because there // may be other trees on the evaluation stack that side-effect the // sources of the UNBOX operation we must spill the stack. impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtDI); // Create the address-expression to reference past the object header // to the beginning of the value-type. Today this means adjusting // past the base of the objects vtable field which is pointer sized. op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2); } else { JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY", canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal"); // Don't optimize, just call the helper and be done with it op1 = gtNewHelperCallNode(helper, (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), gtNewCallArgs(op2, op1)); if (op1->gtType == TYP_STRUCT) { op1->AsCall()->gtRetClsHnd = resolvedToken.hClass; } } assert((helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF) || // Unbox helper returns a byref. (helper == CORINFO_HELP_UNBOX_NULLABLE && varTypeIsStruct(op1)) // UnboxNullable helper returns a struct. ); /* ---------------------------------------------------------------------- | \ helper | | | | \ | | | | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE | | \ | (which returns a BYREF) | (which returns a STRUCT) | | | opcode \ | | | |--------------------------------------------------------------------- | UNBOX | push the BYREF | spill the STRUCT to a local, | | | | push the BYREF to this local | |--------------------------------------------------------------------- | UNBOX_ANY | push a GT_OBJ of | push the STRUCT | | | the BYREF | For Linux when the | | | | struct is returned in two | | | | registers create a temp | | | | which address is passed to | | | | the unbox_nullable helper. | |--------------------------------------------------------------------- */ if (opcode == CEE_UNBOX) { if (helper == CORINFO_HELP_UNBOX_NULLABLE) { // Unbox nullable helper returns a struct type. // We need to spill it to a temp so than can take the address of it. // Here we need unsafe value cls check, since the address of struct is taken to be used // further along and potetially be exploitable. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable")); lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); } assert(op1->gtType == TYP_BYREF); } else { assert(opcode == CEE_UNBOX_ANY); if (helper == CORINFO_HELP_UNBOX) { // Normal unbox helper returns a TYP_BYREF. impPushOnStack(op1, tiRetVal); oper = GT_OBJ; goto OBJ; } assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!"); #if FEATURE_MULTIREG_RET if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass, CorInfoCallConvExtension::Managed)) { // Unbox nullable helper returns a TYP_STRUCT. // For the multi-reg case we need to spill it to a temp so that // we can pass the address to the unbox_nullable jit helper. unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable")); lvaTable[tmp].lvIsMultiRegArg = true; lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */); op2 = gtNewLclvNode(tmp, TYP_STRUCT); op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp. op2 = gtNewLclvNode(tmp, TYP_STRUCT); op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2); op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2); // In this case the return value of the unbox helper is TYP_BYREF. // Make sure the right type is placed on the operand type stack. impPushOnStack(op1, tiRetVal); // Load the struct. oper = GT_OBJ; assert(op1->gtType == TYP_BYREF); goto OBJ; } else #endif // !FEATURE_MULTIREG_RET { // If non register passable struct we have it materialized in the RetBuf. assert(op1->gtType == TYP_STRUCT); tiRetVal = verMakeTypeInfo(resolvedToken.hClass); assert(tiRetVal.IsValueClass()); } } impPushOnStack(op1, tiRetVal); } break; case CEE_BOX: { /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Box); JITDUMP(" %08X", resolvedToken.token); accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); // Note BOX can be used on things that are not value classes, in which // case we get a NOP. However the verifier's view of the type on the // stack changes (in generic code a 'T' becomes a 'boxed T') if (!eeIsValueClass(resolvedToken.hClass)) { JITDUMP("\n Importing BOX(refClass) as NOP\n"); verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal; break; } // Look ahead for box idioms int matched = impBoxPatternMatch(&resolvedToken, codeAddr + sz, codeEndp); if (matched >= 0) { // Skip the matched IL instructions sz += matched; break; } impImportAndPushBox(&resolvedToken); if (compDonotInline()) { return; } } break; case CEE_SIZEOF: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass)); impPushOnStack(op1, tiRetVal); break; case CEE_CASTCLASS: /* Get the Class index */ assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Casting); JITDUMP(" %08X", resolvedToken.token); if (!opts.IsReadyToRun()) { op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; /* Pop the address and create the 'checked cast' helper call */ // At this point we expect typeRef to contain the token, op1 to contain the value being cast, // and op2 to contain code that creates the type handle corresponding to typeRef CASTCLASS: { GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true); if (optTree != nullptr) { impPushOnStack(optTree, tiRetVal); } else { #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF, gtNewCallArgs(op1)); usingReadyToRunHelper = (opLookup != nullptr); op1 = (usingReadyToRunHelper ? opLookup : op1); if (!usingReadyToRunHelper) { // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call // and the chkcastany call with a single call to a dynamic R2R cell that will: // 1) Load the context // 2) Perform the generic dictionary lookup and caching, and generate the appropriate // stub // 3) Check the object on the stack for the type-cast // Reason: performance (today, we'll always use the slow helper for the R2R generics case) op2 = impTokenToHandle(&resolvedToken, nullptr, false); if (op2 == nullptr) { // compDonotInline() return; } } } if (!usingReadyToRunHelper) #endif { op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true, opcodeOffs); } if (compDonotInline()) { return; } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); } } break; case CEE_THROW: // Any block with a throw is rarely executed. block->bbSetRunRarely(); // Pop the exception object and create the 'throw' helper call op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewCallArgs(impPopStack().val)); // Fall through to clear out the eval stack. EVAL_APPEND: if (verCurrentState.esStackDepth > 0) { impEvalSideEffects(); } assert(verCurrentState.esStackDepth == 0); goto APPEND; case CEE_RETHROW: assert(!compIsForInlining()); if (info.compXcptnsCount == 0) { BADCODE("rethrow outside catch"); } /* Create the 'rethrow' helper call */ op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID); goto EVAL_APPEND; case CEE_INITOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = gtNewIconNode(0); // Value op1 = impPopStack().val; // Dest if (eeIsValueClass(resolvedToken.hClass)) { op1 = gtNewStructVal(resolvedToken.hClass, op1); if (op1->OperIs(GT_OBJ)) { gtSetObjGcInfo(op1->AsObj()); } } else { size = info.compCompHnd->getClassSize(resolvedToken.hClass); assert(size == TARGET_POINTER_SIZE); op1 = gtNewBlockVal(op1, size); } op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); goto SPILL_APPEND; case CEE_INITBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Value op1 = impPopStack().val; // Dst addr if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false); } else { if (!op2->IsIntegralConst(0)) { op2 = gtNewOperNode(GT_INIT_VAL, TYP_INT, op2); } op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPBLK: op3 = impPopStack().val; // Size op2 = impPopStack().val; // Src addr op1 = impPopStack().val; // Dst addr if (op2->OperGet() == GT_ADDR) { op2 = op2->AsOp()->gtOp1; } else { op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2); } if (op3->IsCnsIntOrI()) { size = (unsigned)op3->AsIntConCommon()->IconValue(); op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size)); op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, true); } else { op1 = new (this, GT_STORE_DYN_BLK) GenTreeStoreDynBlk(op1, op2, op3); size = 0; if ((prefixFlags & PREFIX_VOLATILE) != 0) { op1->gtFlags |= GTF_BLK_VOLATILE; } } goto SPILL_APPEND; case CEE_CPOBJ: assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (!eeIsValueClass(resolvedToken.hClass)) { op1 = impPopStack().val; // address to load from impBashVarAddrsToI(op1); assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF); op1 = gtNewOperNode(GT_IND, TYP_REF, op1); op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF; impPushOnStack(op1, typeInfo()); opcode = CEE_STIND_REF; lclTyp = TYP_REF; goto STIND; } op2 = impPopStack().val; // Src op1 = impPopStack().val; // Dest op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0)); goto SPILL_APPEND; case CEE_STOBJ: { assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; } if (lclTyp == TYP_REF) { opcode = CEE_STIND_REF; goto STIND; } CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { lclTyp = JITtype2varType(jitTyp); goto STIND; } op2 = impPopStack().val; // Value op1 = impPopStack().val; // Ptr assertImp(varTypeIsStruct(op2)); op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL); if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED)) { op1->gtFlags |= GTF_BLK_UNALIGNED; } goto SPILL_APPEND; } case CEE_MKREFANY: assert(!compIsForInlining()); // Being lazy here. Refanys are tricky in terms of gc tracking. // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany. JITDUMP("disabling struct promotion because of mkrefany\n"); fgNoStructPromotion = true; oper = GT_MKREFANY; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); op2 = impTokenToHandle(&resolvedToken, nullptr, true); if (op2 == nullptr) { // compDonotInline() return; } accessAllowedResult = info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper); impHandleAccessAllowed(accessAllowedResult, &calloutHelper); op1 = impPopStack().val; // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec. // But JIT32 allowed it, so we continue to allow it. assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT); // MKREFANY returns a struct. op2 is the class token. op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2); impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass())); break; case CEE_LDOBJ: { oper = GT_OBJ; assertImp(sz == sizeof(unsigned)); _impResolveToken(CORINFO_TOKENKIND_Class); JITDUMP(" %08X", resolvedToken.token); OBJ: tiRetVal = verMakeTypeInfo(resolvedToken.hClass); if (eeIsValueClass(resolvedToken.hClass)) { lclTyp = TYP_STRUCT; } else { lclTyp = TYP_REF; opcode = CEE_LDIND_REF; goto LDIND; } op1 = impPopStack().val; assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL); CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass); if (impIsPrimitive(jitTyp)) { op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1); // Could point anywhere, example a boxed class static int op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF; assertImp(varTypeIsArithmetic(op1->gtType)); } else { // OBJ returns a struct // and an inline argument which is the class token of the loaded obj op1 = gtNewObjNode(resolvedToken.hClass, op1); } op1->gtFlags |= GTF_EXCEPT; if (prefixFlags & PREFIX_UNALIGNED) { op1->gtFlags |= GTF_IND_UNALIGNED; } impPushOnStack(op1, tiRetVal); break; } case CEE_LDLEN: op1 = impPopStack().val; if (opts.OptimizationEnabled()) { /* Use GT_ARR_LENGTH operator so rng check opts see this */ GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length, block); op1 = arrLen; } else { /* Create the expression "*(array_addr + ArrLenOffs)" */ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL)); op1 = gtNewIndir(TYP_INT, op1); } /* Push the result back on the stack */ impPushOnStack(op1, tiRetVal); break; case CEE_BREAK: op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID); goto SPILL_APPEND; case CEE_NOP: if (opts.compDbgCode) { op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID); goto SPILL_APPEND; } break; /******************************** NYI *******************************/ case 0xCC: OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n"); FALLTHROUGH; case CEE_ILLEGAL: case CEE_MACRO_END: default: if (compIsForInlining()) { compInlineResult->NoteFatal(InlineObservation::CALLEE_COMPILATION_ERROR); return; } BADCODE3("unknown opcode", ": %02X", (int)opcode); } codeAddr += sz; prevOpcode = opcode; prefixFlags = 0; } return; #undef _impResolveToken } #ifdef _PREFAST_ #pragma warning(pop) #endif // Push a local/argument treeon the operand stack void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal) { tiRetVal.NormaliseForStack(); if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr()) { tiRetVal.SetUninitialisedObjRef(); } impPushOnStack(op, tiRetVal); } //------------------------------------------------------------------------ // impCreateLocal: create a GT_LCL_VAR node to access a local that might need to be normalized on load // // Arguments: // lclNum -- The index into lvaTable // offset -- The offset to associate with the node // // Returns: // The node // GenTreeLclVar* Compiler::impCreateLocalNode(unsigned lclNum DEBUGARG(IL_OFFSET offset)) { var_types lclTyp; if (lvaTable[lclNum].lvNormalizeOnLoad()) { lclTyp = lvaGetRealType(lclNum); } else { lclTyp = lvaGetActualType(lclNum); } return gtNewLclvNode(lclNum, lclTyp DEBUGARG(offset)); } // Load a local/argument on the operand stack // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, const typeInfo& tiRetVal) { impPushVar(impCreateLocalNode(lclNum DEBUGARG(offset)), tiRetVal); } // Load an argument on the operand stack // Shared by the various CEE_LDARG opcodes // ilArgNum is the argument index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset) { Verify(ilArgNum < info.compILargsCount, "bad arg num"); if (compIsForInlining()) { if (ilArgNum >= info.compArgsCount) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER); return; } impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo), impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo); } else { if (ilArgNum >= info.compArgsCount) { BADCODE("Bad IL"); } unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param if (lclNum == info.compThisArg) { lclNum = lvaArg0Var; } impLoadVar(lclNum, offset); } } // Load a local on the operand stack // Shared by the various CEE_LDLOC opcodes // ilLclNum is the local index as specified in IL. // It will be mapped to the correct lvaTable index void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset) { if (compIsForInlining()) { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER); return; } // Get the local type var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo; typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo; /* Have we allocated a temp for this local? */ unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp")); // All vars of inlined methods should be !lvNormalizeOnLoad() assert(!lvaTable[lclNum].lvNormalizeOnLoad()); lclTyp = genActualType(lclTyp); impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal); } else { if (ilLclNum >= info.compMethodInfo->locals.numArgs) { BADCODE("Bad IL"); } unsigned lclNum = info.compArgsCount + ilLclNum; impLoadVar(lclNum, offset); } } #ifdef TARGET_ARM /************************************************************************************** * * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the * dst struct, because struct promotion will turn it into a float/double variable while * the rhs will be an int/long variable. We don't code generate assignment of int into * a float, but there is nothing that might prevent us from doing so. The tree however * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int)) * * tmpNum - the lcl dst variable num that is a struct. * src - the src tree assigned to the dest that is a struct/int (when varargs call.) * hClass - the type handle for the struct variable. * * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play, * however, we could do a codegen of transferring from int to float registers * (transfer, not a cast.) * */ void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass) { if (src->gtOper == GT_CALL && src->AsCall()->IsVarargs() && IsHfa(hClass)) { int hfaSlots = GetHfaCount(hClass); var_types hfaType = GetHfaType(hClass); // If we have varargs we morph the method's return type to be "int" irrespective of its original // type: struct/float at importer because the ABI calls out return in integer registers. // We don't want struct promotion to replace an expression like this: // lclFld_int = callvar_int() into lclFld_float = callvar_int(); // This means an int is getting assigned to a float without a cast. Prevent the promotion. if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) || (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES)) { // Make sure this struct type stays as struct so we can receive the call in a struct. lvaTable[tmpNum].lvIsMultiRegRet = true; } } } #endif // TARGET_ARM #if FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impAssignMultiRegTypeToVar: ensure calls that return structs in multiple // registers return values to suitable temps. // // Arguments: // op -- call returning a struct in registers // hClass -- class handle for struct // // Returns: // Tree with reference to struct local to use as call return value. GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass DEBUGARG(CorInfoCallConvExtension callConv)) { unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return")); impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL); GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType); // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns. ret->gtFlags |= GTF_DONT_CSE; assert(IsMultiRegReturnedType(hClass, callConv)); // Mark the var so that fields are not promoted and stay together. lvaTable[tmpNum].lvIsMultiRegRet = true; return ret; } #endif // FEATURE_MULTIREG_RET //------------------------------------------------------------------------ // impReturnInstruction: import a return or an explicit tail call // // Arguments: // prefixFlags -- active IL prefixes // opcode -- [in, out] IL opcode // // Returns: // True if import was successful (may fail for some inlinees) // bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) { const bool isTailCall = (prefixFlags & PREFIX_TAILCALL) != 0; #ifdef DEBUG // If we are importing an inlinee and have GC ref locals we always // need to have a spill temp for the return value. This temp // should have been set up in advance, over in fgFindBasicBlocks. if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID)) { assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM); } #endif // DEBUG GenTree* op2 = nullptr; GenTree* op1 = nullptr; CORINFO_CLASS_HANDLE retClsHnd = nullptr; if (info.compRetType != TYP_VOID) { StackEntry se = impPopStack(); retClsHnd = se.seTypeInfo.GetClassHandle(); op2 = se.val; if (!compIsForInlining()) { impBashVarAddrsToI(op2); op2 = impImplicitIorI4Cast(op2, info.compRetType); op2 = impImplicitR4orR8Cast(op2, info.compRetType); // Note that we allow TYP_I_IMPL<->TYP_BYREF transformation, but only TYP_I_IMPL<-TYP_REF. assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) || ((op2->TypeGet() == TYP_I_IMPL) && TypeIs(info.compRetType, TYP_BYREF)) || (op2->TypeIs(TYP_BYREF, TYP_REF) && (info.compRetType == TYP_I_IMPL)) || (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) || (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType))); #ifdef DEBUG if (!isTailCall && opts.compGcChecks && (info.compRetType == TYP_REF)) { // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with // one-return BB. assert(op2->gtType == TYP_REF); // confirm that the argument is a GC pointer (for debugging (GC stress)) GenTreeCall::Use* args = gtNewCallArgs(op2); op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args); if (verbose) { printf("\ncompGcChecks tree:\n"); gtDispTree(op2); } } #endif } else { if (verCurrentState.esStackDepth != 0) { assert(compIsForInlining()); JITDUMP("CALLSITE_COMPILATION_ERROR: inlinee's stack is not empty."); compInlineResult->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); return false; } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (before normalization) =>\n"); gtDispTree(op2); } #endif // Make sure the type matches the original call. var_types returnType = genActualType(op2->gtType); var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType; if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT)) { originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass); } if (returnType != originalCallType) { // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa. // Allow TYP_REF to be returned as TYP_I_IMPL and NOT vice verse. if ((TypeIs(returnType, TYP_BYREF, TYP_REF) && (originalCallType == TYP_I_IMPL)) || ((returnType == TYP_I_IMPL) && TypeIs(originalCallType, TYP_BYREF))) { JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); } else { JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType), varTypeName(originalCallType)); compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH); return false; } } // Below, we are going to set impInlineInfo->retExpr to the tree with the return // expression. At this point, retExpr could already be set if there are multiple // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of // the other blocks already set it. If there is only a single return block, // retExpr shouldn't be set. However, this is not true if we reimport a block // with a return. In that case, retExpr will be set, then the block will be // reimported, but retExpr won't get cleared as part of setting the block to // be reimported. The reimported retExpr value should be the same, so even if // we don't unconditionally overwrite it, it shouldn't matter. if (info.compRetNativeType != TYP_STRUCT) { // compRetNativeType is not TYP_STRUCT. // This implies it could be either a scalar type or SIMD vector type or // a struct type that can be normalized to a scalar type. if (varTypeIsStruct(info.compRetType)) { noway_assert(info.compRetBuffArg == BAD_VAR_NUM); // adjust the type away from struct to integral // and no normalizing op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); } else { // Do we have to normalize? var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType); if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) && fgCastNeeded(op2, fncRealRetType)) { // Small-typed return values are normalized by the callee op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType); } } if (fgNeedReturnSpillTemp()) { assert(info.compRetNativeType != TYP_VOID && (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals())); // If this method returns a ref type, track the actual types seen // in the returns. if (info.compRetType == TYP_REF) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull); if (impInlineInfo->retExpr == nullptr) { // This is the first return, so best known type is the type // of this return value. impInlineInfo->retExprClassHnd = returnClsHnd; impInlineInfo->retExprClassHndIsExact = isExact; } else if (impInlineInfo->retExprClassHnd != returnClsHnd) { // This return site type differs from earlier seen sites, // so reset the info and we'll fall back to using the method's // declared return type for the return spill temp. impInlineInfo->retExprClassHnd = nullptr; impInlineInfo->retExprClassHndIsExact = false; } } impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); var_types lclRetType = lvaGetDesc(lvaInlineeReturnSpillTemp)->lvType; GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); op2 = tmpOp2; #ifdef DEBUG if (impInlineInfo->retExpr) { // Some other block(s) have seen the CEE_RET first. // Better they spilled to the same temp. assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR); assert(impInlineInfo->retExpr->AsLclVarCommon()->GetLclNum() == op2->AsLclVarCommon()->GetLclNum()); } #endif } #ifdef DEBUG if (verbose) { printf("\n\n Inlinee Return expression (after normalization) =>\n"); gtDispTree(op2); } #endif // Report the return expression impInlineInfo->retExpr = op2; } else { // compRetNativeType is TYP_STRUCT. // This implies that struct return via RetBuf arg or multi-reg struct return GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall(); // Assign the inlinee return into a spill temp. // spill temp only exists if there are multiple return points if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM) { // in this case we have to insert multiple struct copies to the temp // and the retexpr is just the temp. assert(info.compRetNativeType != TYP_VOID); assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()); impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); } #if defined(TARGET_ARM) || defined(UNIX_AMD64_ABI) #if defined(TARGET_ARM) // TODO-ARM64-NYI: HFA // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the // next ifdefs could be refactored in a single method with the ifdef inside. if (IsHfa(retClsHnd)) { // Same as !IsHfa but just don't bother with impAssignStructPtr. #else // defined(UNIX_AMD64_ABI) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { // If single eightbyte, the return type would have been normalized and there won't be a temp var. // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes - // max allowed.) assert(retRegCount == MAX_RET_REG_COUNT); // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr. CLANG_FORMAT_COMMENT_ANCHOR; #endif // defined(UNIX_AMD64_ABI) if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { #if defined(TARGET_ARM) impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType); #else // defined(UNIX_AMD64_ABI) // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); #endif // defined(UNIX_AMD64_ABI) } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_ARM64) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount >= 2); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #elif defined(TARGET_X86) ReturnTypeDesc retTypeDesc; retTypeDesc.InitializeStructReturnType(this, retClsHnd, info.compCallConv); unsigned retRegCount = retTypeDesc.GetReturnRegCount(); if (retRegCount != 0) { assert(!iciCall->HasRetBufArg()); assert(retRegCount == MAX_RET_REG_COUNT); if (fgNeedReturnSpillTemp()) { if (!impInlineInfo->retExpr) { // The inlinee compiler has figured out the type of the temp already. Use it here. impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType); } } else { impInlineInfo->retExpr = op2; } } else #endif // defined(TARGET_ARM64) { assert(iciCall->HasRetBufArg()); GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->GetNode()); // spill temp only exists if there are multiple return points if (fgNeedReturnSpillTemp()) { // if this is the first return we have seen set the retExpr if (!impInlineInfo->retExpr) { impInlineInfo->retExpr = impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType), retClsHnd, (unsigned)CHECK_SPILL_ALL); } } else { impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); } } } if (impInlineInfo->retExpr != nullptr) { impInlineInfo->retBB = compCurBB; } } } if (compIsForInlining()) { return true; } if (info.compRetType == TYP_VOID) { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } else if (info.compRetBuffArg != BAD_VAR_NUM) { // Assign value to return buff (first param) GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF DEBUGARG(impCurStmtDI.GetLocation().GetOffset())); op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL); impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX). CLANG_FORMAT_COMMENT_ANCHOR; #if defined(TARGET_AMD64) // x64 (System V and Win64) calling convention requires to // return the implicit return buffer explicitly (in RAX). // Change the return type to be BYREF. op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); #else // !defined(TARGET_AMD64) // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX). // In such case the return value of the function is changed to BYREF. // If profiler hook is not needed the return type of the function is TYP_VOID. if (compIsProfilerHookNeeded()) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #if defined(TARGET_ARM64) // On ARM64, the native instance calling convention variant // requires the implicit ByRef to be explicitly returned. else if (TargetOS::IsWindows && callConvIsInstanceMethodCallConv(info.compCallConv)) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif #if defined(TARGET_X86) else if (info.compCallConv != CorInfoCallConvExtension::Managed) { op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF)); } #endif else { // return void op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID); } #endif // !defined(TARGET_AMD64) } else if (varTypeIsStruct(info.compRetType)) { #if !FEATURE_MULTIREG_RET // For both ARM architectures the HFA native types are maintained as structs. // Also on System V AMD64 the multireg structs returns are also left as structs. noway_assert(info.compRetNativeType != TYP_STRUCT); #endif op2 = impFixupStructReturnType(op2, retClsHnd, info.compCallConv); // return op2 var_types returnType = info.compRetType; op1 = gtNewOperNode(GT_RETURN, genActualType(returnType), op2); } else { // return op2 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2); } // We must have imported a tailcall and jumped to RET if (isTailCall) { assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode)); opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES // impImportCall() would have already appended TYP_VOID calls if (info.compRetType == TYP_VOID) { return true; } } impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtDI); #ifdef DEBUG // Remember at which BC offset the tree was finished impNoteLastILoffs(); #endif return true; } /***************************************************************************** * Mark the block as unimported. * Note that the caller is responsible for calling impImportBlockPending(), * with the appropriate stack-state */ inline void Compiler::impReimportMarkBlock(BasicBlock* block) { #ifdef DEBUG if (verbose && (block->bbFlags & BBF_IMPORTED)) { printf("\n" FMT_BB " will be reimported\n", block->bbNum); } #endif block->bbFlags &= ~BBF_IMPORTED; } /***************************************************************************** * Mark the successors of the given block as unimported. * Note that the caller is responsible for calling impImportBlockPending() * for all the successors, with the appropriate stack-state. */ void Compiler::impReimportMarkSuccessors(BasicBlock* block) { for (BasicBlock* const succBlock : block->Succs()) { impReimportMarkBlock(succBlock); } } /***************************************************************************** * * Filter wrapper to handle only passed in exception code * from it). */ LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam) { if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION) { return EXCEPTION_EXECUTE_HANDLER; } return EXCEPTION_CONTINUE_SEARCH; } void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart) { assert(block->hasTryIndex()); assert(!compIsForInlining()); unsigned tryIndex = block->getTryIndex(); EHblkDsc* HBtab = ehGetDsc(tryIndex); if (isTryStart) { assert(block->bbFlags & BBF_TRY_BEG); // The Stack must be empty // if (block->bbStkDepth != 0) { BADCODE("Evaluation stack must be empty on entry into a try block"); } } // Save the stack contents, we'll need to restore it later // SavedStack blockState; impSaveStackState(&blockState, false); while (HBtab != nullptr) { if (isTryStart) { // Are we verifying that an instance constructor properly initializes it's 'this' pointer once? // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions // if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init)) { // We trigger an invalid program exception here unless we have a try/fault region. // if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter()) { BADCODE( "The 'this' pointer of an instance constructor is not intialized upon entry to a try region"); } else { // Allow a try/fault region to proceed. assert(HBtab->HasFaultHandler()); } } } // Recursively process the handler block, if we haven't already done so. BasicBlock* hndBegBB = HBtab->ebdHndBeg; if (((hndBegBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(hndBegBB) == 0)) { // Construct the proper verification stack state // either empty or one that contains just // the Exception Object that we are dealing with // verCurrentState.esStackDepth = 0; if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp)) { CORINFO_CLASS_HANDLE clsHnd; if (HBtab->HasFilter()) { clsHnd = impGetObjectClass(); } else { CORINFO_RESOLVED_TOKEN resolvedToken; resolvedToken.tokenContext = impTokenLookupContextHandle; resolvedToken.tokenScope = info.compScopeHnd; resolvedToken.token = HBtab->ebdTyp; resolvedToken.tokenType = CORINFO_TOKENKIND_Class; info.compCompHnd->resolveToken(&resolvedToken); clsHnd = resolvedToken.hClass; } // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdHndBeg! hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false); } // Queue up the handler for importing // impImportBlockPending(hndBegBB); } // Process the filter block, if we haven't already done so. if (HBtab->HasFilter()) { /* @VERIFICATION : Ideally the end of filter state should get propagated to the catch handler, this is an incompleteness, but is not a security/compliance issue, since the only interesting state is the 'thisInit' state. */ BasicBlock* filterBB = HBtab->ebdFilter; if (((filterBB->bbFlags & BBF_IMPORTED) == 0) && (impGetPendingBlockMember(filterBB) == 0)) { verCurrentState.esStackDepth = 0; // push catch arg the stack, spill to a temp if necessary // Note: can update HBtab->ebdFilter! const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB); filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter); impImportBlockPending(filterBB); } } // This seems redundant ....?? if (verTrackObjCtorInitState && HBtab->HasFaultHandler()) { /* Recursively process the handler block */ verCurrentState.esStackDepth = 0; // Queue up the fault handler for importing // impImportBlockPending(HBtab->ebdHndBeg); } // Now process our enclosing try index (if any) // tryIndex = HBtab->ebdEnclosingTryIndex; if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { HBtab = nullptr; } else { HBtab = ehGetDsc(tryIndex); } } // Restore the stack contents impRestoreStackState(&blockState); } //*************************************************************** // Import the instructions for the given basic block. Perform // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first // time, or whose verification pre-state is changed. #ifdef _PREFAST_ #pragma warning(push) #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function #endif void Compiler::impImportBlock(BasicBlock* block) { // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to // handle them specially. In particular, there is no IL to import for them, but we do need // to mark them as imported and put their successors on the pending import list. if (block->bbFlags & BBF_INTERNAL) { JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum); block->bbFlags |= BBF_IMPORTED; for (BasicBlock* const succBlock : block->Succs()) { impImportBlockPending(succBlock); } return; } bool markImport; assert(block); /* Make the block globaly available */ compCurBB = block; #ifdef DEBUG /* Initialize the debug variables */ impCurOpcName = "unknown"; impCurOpcOffs = block->bbCodeOffs; #endif /* Set the current stack state to the merged result */ verResetCurrentState(block, &verCurrentState); /* Now walk the code and import the IL into GenTrees */ struct FilterVerificationExceptionsParam { Compiler* pThis; BasicBlock* block; }; FilterVerificationExceptionsParam param; param.pThis = this; param.block = block; PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param) { /* @VERIFICATION : For now, the only state propagation from try to it's handler is "thisInit" state (stack is empty at start of try). In general, for state that we track in verification, we need to model the possibility that an exception might happen at any IL instruction, so we really need to merge all states that obtain between IL instructions in a try block into the start states of all handlers. However we do not allow the 'this' pointer to be uninitialized when entering most kinds try regions (only try/fault are allowed to have an uninitialized this pointer on entry to the try) Fortunately, the stack is thrown away when an exception leads to a handler, so we don't have to worry about that. We DO, however, have to worry about the "thisInit" state. But only for the try/fault case. The only allowed transition is from TIS_Uninit to TIS_Init. So for a try/fault region for the fault handler block we will merge the start state of the try begin and the post-state of each block that is part of this try region */ // merge the start state of the try begin // if (pParam->block->bbFlags & BBF_TRY_BEG) { pParam->pThis->impVerifyEHBlock(pParam->block, true); } pParam->pThis->impImportBlockCode(pParam->block); // As discussed above: // merge the post-state of each block that is part of this try region // if (pParam->block->hasTryIndex()) { pParam->pThis->impVerifyEHBlock(pParam->block, false); } } PAL_EXCEPT_FILTER(FilterVerificationExceptions) { verHandleVerificationFailure(block DEBUGARG(false)); } PAL_ENDTRY if (compDonotInline()) { return; } assert(!compDonotInline()); markImport = false; SPILLSTACK: unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks bool reimportSpillClique = false; BasicBlock* tgtBlock = nullptr; /* If the stack is non-empty, we might have to spill its contents */ if (verCurrentState.esStackDepth != 0) { impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something // on the stack, its lifetime is hard to determine, simply // don't reuse such temps. Statement* addStmt = nullptr; /* Do the successors of 'block' have any other predecessors ? We do not want to do some of the optimizations related to multiRef if we can reimport blocks */ unsigned multRef = impCanReimport ? unsigned(~0) : 0; switch (block->bbJumpKind) { case BBJ_COND: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_JTRUE); /* Note if the next block has more than one ancestor */ multRef |= block->bbNext->bbRefs; /* Does the next block have temps assigned? */ baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; if (baseTmp != NO_BASE_TMP) { break; } /* Try the target of the jump then */ multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_ALWAYS: multRef |= block->bbJumpDest->bbRefs; baseTmp = block->bbJumpDest->bbStkTempsIn; tgtBlock = block->bbJumpDest; break; case BBJ_NONE: multRef |= block->bbNext->bbRefs; baseTmp = block->bbNext->bbStkTempsIn; tgtBlock = block->bbNext; break; case BBJ_SWITCH: addStmt = impExtractLastStmt(); assert(addStmt->GetRootNode()->gtOper == GT_SWITCH); for (BasicBlock* const tgtBlock : block->SwitchTargets()) { multRef |= tgtBlock->bbRefs; // Thanks to spill cliques, we should have assigned all or none assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn)); baseTmp = tgtBlock->bbStkTempsIn; if (multRef > 1) { break; } } break; case BBJ_CALLFINALLY: case BBJ_EHCATCHRET: case BBJ_RETURN: case BBJ_EHFINALLYRET: case BBJ_EHFILTERRET: case BBJ_THROW: NO_WAY("can't have 'unreached' end of BB with non-empty stack"); break; default: noway_assert(!"Unexpected bbJumpKind"); break; } assert(multRef >= 1); /* Do we have a base temp number? */ bool newTemps = (baseTmp == NO_BASE_TMP); if (newTemps) { /* Grab enough temps for the whole stack */ baseTmp = impGetSpillTmpBase(block); } /* Spill all stack entries into temps */ unsigned level, tempNum; JITDUMP("\nSpilling stack entries into temps\n"); for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++) { GenTree* tree = verCurrentState.esStack[level].val; /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from the other. This should merge to a byref in unverifiable code. However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the successor would be imported assuming there was a TYP_I_IMPL on the stack. Thus the value would not get GC-tracked. Hence, change the temp to TYP_BYREF and reimport the successors. Note: We should only allow this in unverifiable code. */ if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL) { lvaTable[tempNum].lvType = TYP_BYREF; impReimportMarkSuccessors(block); markImport = true; } #ifdef TARGET_64BIT if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "native int". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_I_IMPL; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL) { // Spill clique has decided this should be "native int", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } // Consider the case where one branch left a 'byref' on the stack and the other leaves // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64 // behavior instead of asserting and then generating bad code (where we save/restore the // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been // imported already, we need to change the type of the local and reimport the spill clique. // If the 'byref' side has imported, we insert a cast from int to 'native int' to match // the 'byref' size. if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT) { // Some other block in the spill clique set this to "int", but now we have "byref". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_BYREF; reimportSpillClique = true; } else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF) { // Spill clique has decided this should be "byref", but this block only pushes an "int". // Insert a sign-extension to "native int" so we match the clique size. verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL); } #endif // TARGET_64BIT if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT) { // Some other block in the spill clique set this to "float", but now we have "double". // Change the type and go back to re-import any blocks that used the wrong type. lvaTable[tempNum].lvType = TYP_DOUBLE; reimportSpillClique = true; } else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE) { // Spill clique has decided this should be "double", but this block only pushes a "float". // Insert a cast to "double" so we match the clique. verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE); } /* If addStmt has a reference to tempNum (can only happen if we are spilling to the temps already used by a previous block), we need to spill addStmt */ if (addStmt != nullptr && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum)) { GenTree* addTree = addStmt->GetRootNode(); if (addTree->gtOper == GT_JTRUE) { GenTree* relOp = addTree->AsOp()->gtOp1; assert(relOp->OperIsCompare()); var_types type = genActualType(relOp->AsOp()->gtOp1->TypeGet()); if (gtHasRef(relOp->AsOp()->gtOp1, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1")); impAssignTempGen(temp, relOp->AsOp()->gtOp1, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp1 = gtNewLclvNode(temp, type); } if (gtHasRef(relOp->AsOp()->gtOp2, tempNum)) { unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2")); impAssignTempGen(temp, relOp->AsOp()->gtOp2, level); type = genActualType(lvaTable[temp].TypeGet()); relOp->AsOp()->gtOp2 = gtNewLclvNode(temp, type); } } else { assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet())); unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH")); impAssignTempGen(temp, addTree->AsOp()->gtOp1, level); addTree->AsOp()->gtOp1 = gtNewLclvNode(temp, genActualType(addTree->AsOp()->gtOp1->TypeGet())); } } /* Spill the stack entry, and replace with the temp */ if (!impSpillStackEntry(level, tempNum #ifdef DEBUG , true, "Spill Stack Entry" #endif )) { if (markImport) { BADCODE("bad stack state"); } // Oops. Something went wrong when spilling. Bad code. verHandleVerificationFailure(block DEBUGARG(true)); goto SPILLSTACK; } } /* Put back the 'jtrue'/'switch' if we removed it earlier */ if (addStmt != nullptr) { impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE); } } // Some of the append/spill logic works on compCurBB assert(compCurBB == block); /* Save the tree list in the block */ impEndTreeList(block); // impEndTreeList sets BBF_IMPORTED on the block // We do *NOT* want to set it later than this because // impReimportSpillClique might clear it if this block is both a // predecessor and successor in the current spill clique assert(block->bbFlags & BBF_IMPORTED); // If we had a int/native int, or float/double collision, we need to re-import if (reimportSpillClique) { // This will re-import all the successors of block (as well as each of their predecessors) impReimportSpillClique(block); // For blocks that haven't been imported yet, we still need to mark them as pending import. for (BasicBlock* const succ : block->Succs()) { if ((succ->bbFlags & BBF_IMPORTED) == 0) { impImportBlockPending(succ); } } } else // the normal case { // otherwise just import the successors of block /* Does this block jump to any other blocks? */ for (BasicBlock* const succ : block->Succs()) { impImportBlockPending(succ); } } } #ifdef _PREFAST_ #pragma warning(pop) #endif /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Merges the current verification state into the verification state of "block" // (its "pre-state"). void Compiler::impImportBlockPending(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum); } #endif // We will add a block to the pending set if it has not already been imported (or needs to be re-imported), // or if it has, but merging in a predecessor's post-state changes the block's pre-state. // (When we're doing verification, we always attempt the merge to detect verification errors.) // If the block has not been imported, add to pending set. bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0); // Initialize bbEntryState just the first time we try to add this block to the pending list // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set // We use NULL to indicate the 'common' state to avoid memory allocation if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) && (impGetPendingBlockMember(block) == 0)) { verInitBBEntryState(block, &verCurrentState); assert(block->bbStkDepth == 0); block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth); assert(addToPending); assert(impGetPendingBlockMember(block) == 0); } else { // The stack should have the same height on entry to the block from all its predecessors. if (block->bbStkDepth != verCurrentState.esStackDepth) { #ifdef DEBUG char buffer[400]; sprintf_s(buffer, sizeof(buffer), "Block at offset %4.4x to %4.4x in %0.200s entered with different stack depths.\n" "Previous depth was %d, current depth is %d", block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth, verCurrentState.esStackDepth); buffer[400 - 1] = 0; NO_WAY(buffer); #else NO_WAY("Block entered with different stack depths"); #endif } if (!addToPending) { return; } if (block->bbStkDepth > 0) { // We need to fix the types of any spill temps that might have changed: // int->native int, float->double, int->byref, etc. impRetypeEntryStateTemps(block); } // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_Unknown) PendingDsc; } dsc->pdBB = block; dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth; dsc->pdThisPtrInit = verCurrentState.thisInitialized; // Save the stack trees for later if (verCurrentState.esStackDepth) { impSaveStackState(&dsc->pdSavedStack, false); } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } /*****************************************************************************/ // // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block. void Compiler::impReimportBlockPending(BasicBlock* block) { JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum); assert(block->bbFlags & BBF_IMPORTED); // OK, we must add to the pending list, if it's not already in it. if (impGetPendingBlockMember(block) != 0) { return; } // Get an entry to add to the pending list PendingDsc* dsc; if (impPendingFree) { // We can reuse one of the freed up dscs. dsc = impPendingFree; impPendingFree = dsc->pdNext; } else { // We have to create a new dsc dsc = new (this, CMK_ImpStack) PendingDsc; } dsc->pdBB = block; if (block->bbEntryState) { dsc->pdThisPtrInit = block->bbEntryState->thisInitialized; dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth; dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack; } else { dsc->pdThisPtrInit = TIS_Bottom; dsc->pdSavedStack.ssDepth = 0; dsc->pdSavedStack.ssTrees = nullptr; } // Add the entry to the pending list dsc->pdNext = impPendingList; impPendingList = dsc; impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set. // Various assertions require us to now to consider the block as not imported (at least for // the final time...) block->bbFlags &= ~BBF_IMPORTED; #ifdef DEBUG if (verbose && 0) { printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum); } #endif } void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp) { if (comp->impBlockListNodeFreeList == nullptr) { return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1); } else { BlockListNode* res = comp->impBlockListNodeFreeList; comp->impBlockListNodeFreeList = res->m_next; return res; } } void Compiler::FreeBlockListNode(Compiler::BlockListNode* node) { node->m_next = impBlockListNodeFreeList; impBlockListNodeFreeList = node; } void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback) { bool toDo = true; noway_assert(!fgComputePredsDone); if (!fgCheapPredsValid) { fgComputeCheapPreds(); } BlockListNode* succCliqueToDo = nullptr; BlockListNode* predCliqueToDo = new (this) BlockListNode(block); while (toDo) { toDo = false; // Look at the successors of every member of the predecessor to-do list. while (predCliqueToDo != nullptr) { BlockListNode* node = predCliqueToDo; predCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlock* const succ : blk->Succs()) { // If it's not already in the clique, add it, and also add it // as a member of the successor "toDo" set. if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0) { callback->Visit(SpillCliqueSucc, succ); impSpillCliqueSetMember(SpillCliqueSucc, succ, 1); succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo); toDo = true; } } } // Look at the predecessors of every member of the successor to-do list. while (succCliqueToDo != nullptr) { BlockListNode* node = succCliqueToDo; succCliqueToDo = node->m_next; BasicBlock* blk = node->m_blk; FreeBlockListNode(node); for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next) { BasicBlock* predBlock = pred->block; // If it's not already in the clique, add it, and also add it // as a member of the predecessor "toDo" set. if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0) { callback->Visit(SpillCliquePred, predBlock); impSpillCliqueSetMember(SpillCliquePred, predBlock, 1); predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo); toDo = true; } } } } // If this fails, it means we didn't walk the spill clique properly and somehow managed // miss walking back to include the predecessor we started from. // This most likely cause: missing or out of date bbPreds assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0); } void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliqueSucc) { assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor. blk->bbStkTempsIn = m_baseTmp; } else { assert(predOrSucc == SpillCliquePred); assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor. blk->bbStkTempsOut = m_baseTmp; } } void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk) { // For Preds we could be a little smarter and just find the existing store // and re-type it/add a cast, but that is complicated and hopefully very rare, so // just re-import the whole block (just like we do for successors) if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0)) { // If we haven't imported this block and we're not going to (because it isn't on // the pending list) then just ignore it for now. // This block has either never been imported (EntryState == NULL) or it failed // verification. Neither state requires us to force it to be imported now. assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION)); return; } // For successors we have a valid verCurrentState, so just mark them for reimport // the 'normal' way // Unlike predecessors, we *DO* need to reimport the current block because the // initial import had the wrong entry state types. // Similarly, blocks that are currently on the pending list, still need to call // impImportBlockPending to fixup their entry state. if (predOrSucc == SpillCliqueSucc) { m_pComp->impReimportMarkBlock(blk); // Set the current stack state to that of the blk->bbEntryState m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState); assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry()); m_pComp->impImportBlockPending(blk); } else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0)) { // As described above, we are only visiting predecessors so they can // add the appropriate casts, since we have already done that for the current // block, it does not need to be reimported. // Nor do we need to reimport blocks that are still pending, but not yet // imported. // // For predecessors, we have no state to seed the EntryState, so we just have // to assume the existing one is correct. // If the block is also a successor, it will get the EntryState properly // updated when it is visited as a successor in the above "if" block. assert(predOrSucc == SpillCliquePred); m_pComp->impReimportBlockPending(blk); } } // Re-type the incoming lclVar nodes to match the varDsc. void Compiler::impRetypeEntryStateTemps(BasicBlock* blk) { if (blk->bbEntryState != nullptr) { EntryState* es = blk->bbEntryState; for (unsigned level = 0; level < es->esStackDepth; level++) { GenTree* tree = es->esStack[level].val; if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD)) { es->esStack[level].val->gtType = lvaGetDesc(tree->AsLclVarCommon())->TypeGet(); } } } } unsigned Compiler::impGetSpillTmpBase(BasicBlock* block) { if (block->bbStkTempsOut != NO_BASE_TMP) { return block->bbStkTempsOut; } #ifdef DEBUG if (verbose) { printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // Otherwise, choose one, and propagate to all members of the spill clique. // Grab enough temps for the whole stack. unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries")); SetSpillTempsBase callback(baseTmp); // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor // to one spill clique, and similarly can only be the successor to one spill clique impWalkSpillCliqueFromPred(block, &callback); return baseTmp; } void Compiler::impReimportSpillClique(BasicBlock* block) { #ifdef DEBUG if (verbose) { printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum); } #endif // DEBUG // If we get here, it is because this block is already part of a spill clique // and one predecessor had an outgoing live stack slot of type int, and this // block has an outgoing live stack slot of type native int. // We need to reset these before traversal because they have already been set // by the previous walk to determine all the members of the spill clique. impInlineRoot()->impSpillCliquePredMembers.Reset(); impInlineRoot()->impSpillCliqueSuccMembers.Reset(); ReimportSpillClique callback(this); impWalkSpillCliqueFromPred(block, &callback); } // Set the pre-state of "block" (which should not have a pre-state allocated) to // a copy of "srcState", cloning tree pointers as required. void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState) { if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom) { block->bbEntryState = nullptr; return; } block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1); // block->bbEntryState.esRefcount = 1; block->bbEntryState->esStackDepth = srcState->esStackDepth; block->bbEntryState->thisInitialized = TIS_Bottom; if (srcState->esStackDepth > 0) { block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]); unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry); memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize); for (unsigned level = 0; level < srcState->esStackDepth; level++) { GenTree* tree = srcState->esStack[level].val; block->bbEntryState->esStack[level].val = gtCloneExpr(tree); } } if (verTrackObjCtorInitState) { verSetThisInit(block, srcState->thisInitialized); } return; } void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis) { assert(tis != TIS_Bottom); // Precondition. if (block->bbEntryState == nullptr) { block->bbEntryState = new (this, CMK_Unknown) EntryState(); } block->bbEntryState->thisInitialized = tis; } /* * Resets the current state to the state at the start of the basic block */ void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState) { if (block->bbEntryState == nullptr) { destState->esStackDepth = 0; destState->thisInitialized = TIS_Bottom; return; } destState->esStackDepth = block->bbEntryState->esStackDepth; if (destState->esStackDepth > 0) { unsigned stackSize = destState->esStackDepth * sizeof(StackEntry); memcpy(destState->esStack, block->bbStackOnEntry(), stackSize); } destState->thisInitialized = block->bbThisOnEntry(); return; } ThisInitState BasicBlock::bbThisOnEntry() const { return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom; } unsigned BasicBlock::bbStackDepthOnEntry() const { return (bbEntryState ? bbEntryState->esStackDepth : 0); } void BasicBlock::bbSetStack(void* stackBuffer) { assert(bbEntryState); assert(stackBuffer); bbEntryState->esStack = (StackEntry*)stackBuffer; } StackEntry* BasicBlock::bbStackOnEntry() const { assert(bbEntryState); return bbEntryState->esStack; } void Compiler::verInitCurrentState() { verTrackObjCtorInitState = false; verCurrentState.thisInitialized = TIS_Bottom; // initialize stack info verCurrentState.esStackDepth = 0; assert(verCurrentState.esStack != nullptr); // copy current state to entry state of first BB verInitBBEntryState(fgFirstBB, &verCurrentState); } Compiler* Compiler::impInlineRoot() { if (impInlineInfo == nullptr) { return this; } else { return impInlineInfo->InlineRoot; } } BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk) { if (predOrSucc == SpillCliquePred) { return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd()); } else { assert(predOrSucc == SpillCliqueSucc); return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd()); } } void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val) { if (predOrSucc == SpillCliquePred) { impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val); } else { assert(predOrSucc == SpillCliqueSucc); impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val); } } /***************************************************************************** * * Convert the instrs ("import") into our internal format (trees). The * basic flowgraph has already been constructed and is passed in. */ void Compiler::impImport() { #ifdef DEBUG if (verbose) { printf("*************** In impImport() for %s\n", info.compFullName); } #endif Compiler* inlineRoot = impInlineRoot(); if (info.compMaxStack <= SMALL_STACK_SIZE) { impStkSize = SMALL_STACK_SIZE; } else { impStkSize = info.compMaxStack; } if (this == inlineRoot) { // Allocate the stack contents verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } else { // This is the inlinee compiler, steal the stack from the inliner compiler // (after ensuring that it is large enough). if (inlineRoot->impStkSize < impStkSize) { inlineRoot->impStkSize = impStkSize; inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize]; } verCurrentState.esStack = inlineRoot->verCurrentState.esStack; } // initialize the entry state at start of method verInitCurrentState(); // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase). if (this == inlineRoot) // These are only used on the root of the inlining tree. { // We have initialized these previously, but to size 0. Make them larger. impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2); impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2); } inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2); inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2); impBlockListNodeFreeList = nullptr; #ifdef DEBUG impLastILoffsStmt = nullptr; impNestedStackSpill = false; #endif impBoxTemp = BAD_VAR_NUM; impPendingList = impPendingFree = nullptr; // Skip leading internal blocks. // These can arise from needing a leading scratch BB, from EH normalization, and from OSR entry redirects. // BasicBlock* entryBlock = fgFirstBB; while (entryBlock->bbFlags & BBF_INTERNAL) { JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; if (entryBlock->bbJumpKind == BBJ_NONE) { entryBlock = entryBlock->bbNext; } else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } else { assert(!"unexpected bbJumpKind in entry sequence"); } } // Note for OSR we'd like to be able to verify this block must be // stack empty, but won't know that until we've imported...so instead // we'll BADCODE out if we mess up. // // (the concern here is that the runtime asks us to OSR a // different IL version than the one that matched the method that // triggered OSR). This should not happen but I might have the // IL versioning stuff wrong. // // TODO: we also currently expect this block to be a join point, // which we should verify over when we find jump targets. impImportBlockPending(entryBlock); /* Import blocks in the worker-list until there are no more */ while (impPendingList) { /* Remove the entry at the front of the list */ PendingDsc* dsc = impPendingList; impPendingList = impPendingList->pdNext; impSetPendingBlockMember(dsc->pdBB, 0); /* Restore the stack state */ verCurrentState.thisInitialized = dsc->pdThisPtrInit; verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth; if (verCurrentState.esStackDepth) { impRestoreStackState(&dsc->pdSavedStack); } /* Add the entry to the free list for reuse */ dsc->pdNext = impPendingFree; impPendingFree = dsc; /* Now import the block */ if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION) { verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true)); impEndTreeList(dsc->pdBB); } else { impImportBlock(dsc->pdBB); if (compDonotInline()) { return; } if (compIsForImportOnly()) { return; } } } #ifdef DEBUG if (verbose && info.compXcptnsCount) { printf("\nAfter impImport() added block for try,catch,finally"); fgDispBasicBlocks(); printf("\n"); } // Used in impImportBlockPending() for STRESS_CHK_REIMPORT for (BasicBlock* const block : Blocks()) { block->bbFlags &= ~BBF_VISITED; } #endif } // Checks if a typeinfo (usually stored in the type stack) is a struct. // The invariant here is that if it's not a ref or a method and has a class handle // it's a valuetype bool Compiler::impIsValueType(typeInfo* pTypeInfo) { if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd()) { return true; } else { return false; } } /***************************************************************************** * Check to see if the tree is the address of a local or the address of a field in a local. *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns true. */ bool Compiler::impIsAddressInLocal(const GenTree* tree, GenTree** lclVarTreeOut) { if (tree->gtOper != GT_ADDR) { return false; } GenTree* op = tree->AsOp()->gtOp1; while (op->gtOper == GT_FIELD) { op = op->AsField()->GetFldObj(); if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL. { op = op->AsOp()->gtOp1; } else { return false; } } if (op->gtOper == GT_LCL_VAR) { if (lclVarTreeOut != nullptr) { *lclVarTreeOut = op; } return true; } else { return false; } } //------------------------------------------------------------------------ // impMakeDiscretionaryInlineObservations: make observations that help // determine the profitability of a discretionary inline // // Arguments: // pInlineInfo -- InlineInfo for the inline, or null for the prejit root // inlineResult -- InlineResult accumulating information about this inline // // Notes: // If inlining or prejitting the root, this method also makes // various observations about the method that factor into inline // decisions. It sets `compNativeSizeEstimate` as a side effect. void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult) { assert((pInlineInfo != nullptr && compIsForInlining()) || // Perform the actual inlining. (pInlineInfo == nullptr && !compIsForInlining()) // Calculate the static inlining hint for ngen. ); // If we're really inlining, we should just have one result in play. assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult)); // If this is a "forceinline" method, the JIT probably shouldn't have gone // to the trouble of estimating the native code size. Even if it did, it // shouldn't be relying on the result of this method. assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE); // Note if the caller contains NEWOBJ or NEWARR. Compiler* rootCompiler = impInlineRoot(); if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY); } if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0) { inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ); } bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0; bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0; if (isSpecialMethod) { if (calleeIsStatic) { inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR); } else { inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR); } } else if (!calleeIsStatic) { // Callee is an instance method. // // Check if the callee has the same 'this' as the root. if (pInlineInfo != nullptr) { GenTree* thisArg = pInlineInfo->iciCall->AsCall()->gtCallThisArg->GetNode(); assert(thisArg); bool isSameThis = impIsThis(thisArg); inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis); } } bool callsiteIsGeneric = (rootCompiler->info.compMethodInfo->args.sigInst.methInstCount != 0) || (rootCompiler->info.compMethodInfo->args.sigInst.classInstCount != 0); bool calleeIsGeneric = (info.compMethodInfo->args.sigInst.methInstCount != 0) || (info.compMethodInfo->args.sigInst.classInstCount != 0); if (!callsiteIsGeneric && calleeIsGeneric) { inlineResult->Note(InlineObservation::CALLSITE_NONGENERIC_CALLS_GENERIC); } // Inspect callee's arguments (and the actual values at the callsite for them) CORINFO_SIG_INFO sig = info.compMethodInfo->args; CORINFO_ARG_LIST_HANDLE sigArg = sig.args; GenTreeCall::Use* argUse = pInlineInfo == nullptr ? nullptr : pInlineInfo->iciCall->AsCall()->gtCallArgs; for (unsigned i = 0; i < info.compMethodInfo->args.numArgs; i++) { CORINFO_CLASS_HANDLE sigClass; CorInfoType corType = strip(info.compCompHnd->getArgType(&sig, sigArg, &sigClass)); GenTree* argNode = argUse == nullptr ? nullptr : argUse->GetNode()->gtSkipPutArgType(); if (corType == CORINFO_TYPE_CLASS) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); } else if (corType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_ARG_STRUCT); } else if (corType == CORINFO_TYPE_BYREF) { sigClass = info.compCompHnd->getArgClass(&sig, sigArg); corType = info.compCompHnd->getChildType(sigClass, &sigClass); } if (argNode != nullptr) { bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE argCls = gtGetClassHandle(argNode, &isExact, &isNonNull); if (argCls != nullptr) { const bool isArgValueType = eeIsValueClass(argCls); // Exact class of the arg is known if (isExact && !isArgValueType) { inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS); if ((argCls != sigClass) && (sigClass != nullptr)) { // .. but the signature accepts a less concrete type. inlineResult->Note(InlineObservation::CALLSITE_ARG_EXACT_CLS_SIG_IS_NOT); } } // Arg is a reference type in the signature and a boxed value type was passed. else if (isArgValueType && (corType == CORINFO_TYPE_CLASS)) { inlineResult->Note(InlineObservation::CALLSITE_ARG_BOXED); } } if (argNode->OperIsConst()) { inlineResult->Note(InlineObservation::CALLSITE_ARG_CONST); } argUse = argUse->GetNext(); } sigArg = info.compCompHnd->getArgNext(sigArg); } // Note if the callee's return type is a value type if (info.compMethodInfo->args.retType == CORINFO_TYPE_VALUECLASS) { inlineResult->Note(InlineObservation::CALLEE_RETURNS_STRUCT); } // Note if the callee's class is a promotable struct if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0) { assert(structPromotionHelper != nullptr); if (structPromotionHelper->CanPromoteStructType(info.compClassHnd)) { inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE); } inlineResult->Note(InlineObservation::CALLEE_CLASS_VALUETYPE); } #ifdef FEATURE_SIMD // Note if this method is has SIMD args or return value if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn) { inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD); } #endif // FEATURE_SIMD // Roughly classify callsite frequency. InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED; // If this is a prejit root, or a maximally hot block... if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->isMaxBBWeight())) { frequency = InlineCallsiteFrequency::HOT; } // No training data. Look for loop-like things. // We consider a recursive call loop-like. Do not give the inlining boost to the method itself. // However, give it to things nearby. else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) && (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle)) { frequency = InlineCallsiteFrequency::LOOP; } else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT)) { frequency = InlineCallsiteFrequency::WARM; } // Now modify the multiplier based on where we're called from. else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR)) { frequency = InlineCallsiteFrequency::RARE; } else { frequency = InlineCallsiteFrequency::BORING; } // Also capture the block weight of the call site. // // In the prejit root case, assume at runtime there might be a hot call site // for this method, so we won't prematurely conclude this method should never // be inlined. // weight_t weight = 0; if (pInlineInfo != nullptr) { weight = pInlineInfo->iciBlock->bbWeight; } else { const weight_t prejitHotCallerWeight = 1000000.0; weight = prejitHotCallerWeight; } inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency)); inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, (int)(weight)); bool hasProfile = false; double profileFreq = 0.0; // If the call site has profile data, report the relative frequency of the site. // if ((pInlineInfo != nullptr) && rootCompiler->fgHaveSufficientProfileData()) { const weight_t callSiteWeight = pInlineInfo->iciBlock->bbWeight; const weight_t entryWeight = rootCompiler->fgFirstBB->bbWeight; profileFreq = fgProfileWeightsEqual(entryWeight, 0.0) ? 0.0 : callSiteWeight / entryWeight; hasProfile = true; assert(callSiteWeight >= 0); assert(entryWeight >= 0); } else if (pInlineInfo == nullptr) { // Simulate a hot callsite for PrejitRoot mode. hasProfile = true; profileFreq = 1.0; } inlineResult->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, hasProfile); inlineResult->NoteDouble(InlineObservation::CALLSITE_PROFILE_FREQUENCY, profileFreq); } /***************************************************************************** This method makes STATIC inlining decision based on the IL code. It should not make any inlining decision based on the context. If forceInline is true, then the inlining decision should not depend on performance heuristics (code size, etc.). */ void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle, CORINFO_METHOD_INFO* methInfo, bool forceInline, InlineResult* inlineResult) { unsigned codeSize = methInfo->ILCodeSize; // We shouldn't have made up our minds yet... assert(!inlineResult->IsDecided()); if (methInfo->EHcount) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH); return; } if ((methInfo->ILCode == nullptr) || (codeSize == 0)) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // For now we don't inline varargs (import code can't handle it) if (methInfo->args.isVarArg()) { inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS); return; } // Reject if it has too many locals. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs); if (methInfo->locals.numArgs > MAX_INL_LCLS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS); return; } // Make sure there aren't too many arguments. // This is currently an implementation limit due to fixed-size arrays in the // inline info, rather than a performance heuristic. inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs); if (methInfo->args.numArgs > MAX_INL_ARGS) { inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS); return; } // Note force inline state inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline); // Note IL code size inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize); if (inlineResult->IsFailure()) { return; } // Make sure maxstack is not too big inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack); if (inlineResult->IsFailure()) { return; } } /***************************************************************************** */ void Compiler::impCheckCanInline(GenTreeCall* call, CORINFO_METHOD_HANDLE fncHandle, unsigned methAttr, CORINFO_CONTEXT_HANDLE exactContextHnd, InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult) { // Either EE or JIT might throw exceptions below. // If that happens, just don't inline the method. struct Param { Compiler* pThis; GenTreeCall* call; CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; CORINFO_CONTEXT_HANDLE exactContextHnd; InlineResult* result; InlineCandidateInfo** ppInlineCandidateInfo; } param; memset(&param, 0, sizeof(param)); param.pThis = this; param.call = call; param.fncHandle = fncHandle; param.methAttr = methAttr; param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle); param.result = inlineResult; param.ppInlineCandidateInfo = ppInlineCandidateInfo; bool success = eeRunWithErrorTrap<Param>( [](Param* pParam) { CorInfoInitClassResult initClassResult; #ifdef DEBUG const char* methodName; const char* className; methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className); if (JitConfig.JitNoInline()) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE); goto _exit; } #endif /* Try to get the code address/size for the method */ CORINFO_METHOD_INFO methInfo; if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo)) { pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO); goto _exit; } // Profile data allows us to avoid early "too many IL bytes" outs. pParam->result->NoteBool(InlineObservation::CALLSITE_HAS_PROFILE, pParam->pThis->fgHaveSufficientProfileData()); bool forceInline; forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE); pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result); if (pParam->result->IsFailure()) { assert(pParam->result->IsNever()); goto _exit; } // Speculatively check if initClass() can be done. // If it can be done, we will try to inline the method. initClassResult = pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */, pParam->exactContextHnd /* context */); if (initClassResult & CORINFO_INITCLASS_DONT_INLINE) { pParam->result->NoteFatal(InlineObservation::CALLSITE_CANT_CLASS_INIT); goto _exit; } // Given the EE the final say in whether to inline or not. // This should be last since for verifiable code, this can be expensive /* VM Inline check also ensures that the method is verifiable if needed */ CorInfoInline vmResult; vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle); if (vmResult == INLINE_FAIL) { pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE); } else if (vmResult == INLINE_NEVER) { pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE); } if (pParam->result->IsFailure()) { // Make sure not to report this one. It was already reported by the VM. pParam->result->SetReported(); goto _exit; } /* Get the method properties */ CORINFO_CLASS_HANDLE clsHandle; clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle); unsigned clsAttr; clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle); /* Get the return type */ var_types fncRetType; fncRetType = pParam->call->TypeGet(); #ifdef DEBUG var_types fncRealRetType; fncRealRetType = JITtype2varType(methInfo.args.retType); assert((genActualType(fncRealRetType) == genActualType(fncRetType)) || // <BUGNUM> VSW 288602 </BUGNUM> // In case of IJW, we allow to assign a native pointer to a BYREF. (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) || (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT))); #endif // Allocate an InlineCandidateInfo structure, // // Or, reuse the existing GuardedDevirtualizationCandidateInfo, // which was pre-allocated to have extra room. // InlineCandidateInfo* pInfo; if (pParam->call->IsGuardedDevirtualizationCandidate()) { pInfo = pParam->call->gtInlineCandidateInfo; } else { pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo; // Null out bits we don't use when we're just inlining pInfo->guardedClassHandle = nullptr; pInfo->guardedMethodHandle = nullptr; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->likelihood = 0; pInfo->requiresInstMethodTableArg = false; } pInfo->methInfo = methInfo; pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd; pInfo->clsHandle = clsHandle; pInfo->exactContextHnd = pParam->exactContextHnd; pInfo->retExpr = nullptr; pInfo->preexistingSpillTemp = BAD_VAR_NUM; pInfo->clsAttr = clsAttr; pInfo->methAttr = pParam->methAttr; pInfo->initClassResult = initClassResult; pInfo->fncRetType = fncRetType; pInfo->exactContextNeedsRuntimeLookup = false; pInfo->inlinersContext = pParam->pThis->compInlineContext; // Note exactContextNeedsRuntimeLookup is reset later on, // over in impMarkInlineCandidate. *(pParam->ppInlineCandidateInfo) = pInfo; _exit:; }, &param); if (!success) { param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR); } } //------------------------------------------------------------------------ // impInlineRecordArgInfo: record information about an inline candidate argument // // Arguments: // pInlineInfo - inline info for the inline candidate // curArgVal - tree for the caller actual argument value // argNum - logical index of this argument // inlineResult - result of ongoing inline evaluation // // Notes: // // Checks for various inline blocking conditions and makes notes in // the inline info arg table about the properties of the actual. These // properties are used later by impInlineFetchArg to determine how best to // pass the argument into the inlinee. void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, InlineResult* inlineResult) { InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum]; inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR. curArgVal = curArgVal->gtSkipPutArgType(); curArgVal = curArgVal->gtRetExprVal(); if (curArgVal->gtOper == GT_MKREFANY) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY); return; } GenTree* lclVarTree; const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree); if (isAddressInLocal && varTypeIsStruct(lclVarTree)) { inlCurArgInfo->argIsByRefToStructLocal = true; #ifdef FEATURE_SIMD if (lvaTable[lclVarTree->AsLclVarCommon()->GetLclNum()].lvSIMDType) { pInlineInfo->hasSIMDTypeArgLocalOrReturn = true; } #endif // FEATURE_SIMD } if (curArgVal->gtFlags & GTF_ALL_EFFECT) { inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0; inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0; } if (curArgVal->gtOper == GT_LCL_VAR) { inlCurArgInfo->argIsLclVar = true; /* Remember the "original" argument number */ INDEBUG(curArgVal->AsLclVar()->gtLclILoffs = argNum;) } if (curArgVal->IsInvariant()) { inlCurArgInfo->argIsInvariant = true; if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->AsIntCon()->gtIconVal == 0)) { // Abort inlining at this call site inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS); return; } } bool isExact = false; bool isNonNull = false; inlCurArgInfo->argIsExact = (gtGetClassHandle(curArgVal, &isExact, &isNonNull) != NO_CLASS_HANDLE) && isExact; // If the arg is a local that is address-taken, we can't safely // directly substitute it into the inlinee. // // Previously we'd accomplish this by setting "argHasLdargaOp" but // that has a stronger meaning: that the arg value can change in // the method body. Using that flag prevents type propagation, // which is safe in this case. // // Instead mark the arg as having a caller local ref. if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal)) { inlCurArgInfo->argHasCallerLocalRef = true; } #ifdef DEBUG if (verbose) { if (inlCurArgInfo->argIsThis) { printf("thisArg:"); } else { printf("\nArgument #%u:", argNum); } if (inlCurArgInfo->argIsLclVar) { printf(" is a local var"); } if (inlCurArgInfo->argIsInvariant) { printf(" is a constant"); } if (inlCurArgInfo->argHasGlobRef) { printf(" has global refs"); } if (inlCurArgInfo->argHasCallerLocalRef) { printf(" has caller local ref"); } if (inlCurArgInfo->argHasSideEff) { printf(" has side effects"); } if (inlCurArgInfo->argHasLdargaOp) { printf(" has ldarga effect"); } if (inlCurArgInfo->argHasStargOp) { printf(" has starg effect"); } if (inlCurArgInfo->argIsByRefToStructLocal) { printf(" is byref to a struct local"); } printf("\n"); gtDispTree(curArgVal); printf("\n"); } #endif } //------------------------------------------------------------------------ // impInlineInitVars: setup inline information for inlinee args and locals // // Arguments: // pInlineInfo - inline info for the inline candidate // // Notes: // This method primarily adds caller-supplied info to the inlArgInfo // and sets up the lclVarInfo table. // // For args, the inlArgInfo records properties of the actual argument // including the tree node that produces the arg value. This node is // usually the tree node present at the call, but may also differ in // various ways: // - when the call arg is a GT_RET_EXPR, we search back through the ret // expr chain for the actual node. Note this will either be the original // call (which will be a failed inline by this point), or the return // expression from some set of inlines. // - when argument type casting is needed the necessary casts are added // around the argument node. // - if an argument can be simplified by folding then the node here is the // folded value. // // The method may make observations that lead to marking this candidate as // a failed inline. If this happens the initialization is abandoned immediately // to try and reduce the jit time cost for a failed inline. void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) { assert(!compIsForInlining()); GenTreeCall* call = pInlineInfo->iciCall; CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo; unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr; InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo; InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo; InlineResult* inlineResult = pInlineInfo->inlineResult; // Inlined methods always use the managed calling convention const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo, CorInfoCallConvExtension::Managed); /* init the argument stuct */ memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0])); GenTreeCall::Use* thisArg = call->gtCallThisArg; unsigned argCnt = 0; // Count of the arguments assert((methInfo->args.hasThis()) == (thisArg != nullptr)); if (thisArg != nullptr) { inlArgInfo[0].argIsThis = true; impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Record some information about each of the arguments */ bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0; #if USER_ARGS_COME_LAST unsigned typeCtxtArg = (thisArg != nullptr) ? 1 : 0; #else // USER_ARGS_COME_LAST unsigned typeCtxtArg = methInfo->args.totalILArgs(); #endif // USER_ARGS_COME_LAST for (GenTreeCall::Use& use : call->Args()) { if (hasRetBuffArg && (&use == call->gtCallArgs)) { continue; } // Ignore the type context argument if (hasTypeCtxtArg && (argCnt == typeCtxtArg)) { pInlineInfo->typeContextArg = typeCtxtArg; typeCtxtArg = 0xFFFFFFFF; continue; } GenTree* actualArg = gtFoldExpr(use.GetNode()); impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult); if (inlineResult->IsFailure()) { return; } /* Increment the argument count */ argCnt++; } /* Make sure we got the arg number right */ assert(argCnt == methInfo->args.totalILArgs()); #ifdef FEATURE_SIMD bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn; #endif // FEATURE_SIMD /* We have typeless opcodes, get type information from the signature */ if (thisArg != nullptr) { lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle); lclVarInfo[0].lclHasLdlocaOp = false; #ifdef FEATURE_SIMD // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase // the inlining multiplier) for anything in that assembly. // But we only need to normalize it if it is a TYP_STRUCT // (which we need to do even if we have already set foundSIMDType). if (!foundSIMDType && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo))) { foundSIMDType = true; } #endif // FEATURE_SIMD var_types sigType = ((clsAttr & CORINFO_FLG_VALUECLASS) != 0) ? TYP_BYREF : TYP_REF; lclVarInfo[0].lclTypeInfo = sigType; GenTree* thisArgNode = thisArg->GetNode(); assert(varTypeIsGC(thisArgNode->TypeGet()) || // "this" is managed ((thisArgNode->TypeGet() == TYP_I_IMPL) && // "this" is unmgd but the method's class doesnt care (clsAttr & CORINFO_FLG_VALUECLASS))); if (genActualType(thisArgNode->TypeGet()) != genActualType(sigType)) { if (sigType == TYP_REF) { /* The argument cannot be bashed into a ref (see bug 750871) */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF); return; } /* This can only happen with byrefs <-> ints/shorts */ assert(sigType == TYP_BYREF); assert((genActualType(thisArgNode->TypeGet()) == TYP_I_IMPL) || (thisArgNode->TypeGet() == TYP_BYREF)); lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } } /* Init the types of the arguments and make sure the types * from the trees match the types in the signature */ CORINFO_ARG_LIST_HANDLE argLst; argLst = methInfo->args.args; unsigned i; for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst)) { var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args); lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst); #ifdef FEATURE_SIMD if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo))) { // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've // found a SIMD type, even if this may not be a type we recognize (the assumption is that // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier). foundSIMDType = true; if (sigType == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle()); sigType = structType; } } #endif // FEATURE_SIMD lclVarInfo[i].lclTypeInfo = sigType; lclVarInfo[i].lclHasLdlocaOp = false; /* Does the tree type match the signature type? */ GenTree* inlArgNode = inlArgInfo[i].argNode; if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE)) { assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType)); assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType)); /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints, but in bad IL cases with caller-callee signature mismatches we can see other types. Intentionally reject cases with mismatches so the jit is more flexible when encountering bad IL. */ bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) || (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) || (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType)); if (!isPlausibleTypeMatch) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE); return; } GenTree** pInlArgNode; if (inlArgNode->OperIs(GT_PUTARG_TYPE)) { // There was a widening or narrowing cast. GenTreeUnOp* putArgType = inlArgNode->AsUnOp(); pInlArgNode = &putArgType->gtOp1; inlArgNode = putArgType->gtOp1; } else { // The same size but different type of the arguments. pInlArgNode = &inlArgInfo[i].argNode; } /* Is it a narrowing or widening cast? * Widening casts are ok since the value computed is already * normalized to an int (on the IL stack) */ if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType)) { if (sigType == TYP_BYREF) { lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else if (inlArgNode->gtType == TYP_BYREF) { assert(varTypeIsIntOrI(sigType)); /* If possible bash the BYREF to an int */ if (inlArgNode->IsLocalAddrExpr() != nullptr) { inlArgNode->gtType = TYP_I_IMPL; lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL)); } else { /* Arguments 'int <- byref' cannot be changed */ inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT); return; } } else if (genTypeSize(sigType) < TARGET_POINTER_SIZE) { // Narrowing cast. if (inlArgNode->OperIs(GT_LCL_VAR)) { const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum(); if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum)) { // We don't need to insert a cast here as the variable // was assigned a normalized value of the right type. continue; } } inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; // Try to fold the node in case we have constant arguments. if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #ifdef TARGET_64BIT else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType)) { // This should only happen for int -> native int widening inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; /* Try to fold the node in case we have constant arguments */ if (inlArgInfo[i].argIsInvariant) { inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } *pInlArgNode = inlArgNode; } #endif // TARGET_64BIT } } } /* Init the types of the local variables */ CORINFO_ARG_LIST_HANDLE localsSig; localsSig = methInfo->locals.args; for (i = 0; i < methInfo->locals.numArgs; i++) { bool isPinned; var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned); lclVarInfo[i + argCnt].lclHasLdlocaOp = false; lclVarInfo[i + argCnt].lclTypeInfo = type; if (varTypeIsGC(type)) { if (isPinned) { JITDUMP("Inlinee local #%02u is pinned\n", i); lclVarInfo[i + argCnt].lclIsPinned = true; // Pinned locals may cause inlines to fail. inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS); if (inlineResult->IsFailure()) { return; } } pInlineInfo->numberOfGcRefLocals++; } else if (isPinned) { JITDUMP("Ignoring pin on inlinee local #%02u -- not a GC type\n", i); } lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig); // If this local is a struct type with GC fields, inform the inliner. It may choose to bail // out on the inline. if (type == TYP_STRUCT) { CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle(); DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle); if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0) { inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT); if (inlineResult->IsFailure()) { return; } // Do further notification in the case where the call site is rare; some policies do // not track the relative hotness of call sites for "always" inline cases. if (pInlineInfo->iciBlock->isRunRarely()) { inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT); if (inlineResult->IsFailure()) { return; } } } } localsSig = info.compCompHnd->getArgNext(localsSig); #ifdef FEATURE_SIMD if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo))) { foundSIMDType = true; if (supportSIMDTypes() && type == TYP_STRUCT) { var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle()); lclVarInfo[i + argCnt].lclTypeInfo = structType; } } #endif // FEATURE_SIMD } #ifdef FEATURE_SIMD if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd)) { foundSIMDType = true; } pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType; #endif // FEATURE_SIMD } //------------------------------------------------------------------------ // impInlineFetchLocal: get a local var that represents an inlinee local // // Arguments: // lclNum -- number of the inlinee local // reason -- debug string describing purpose of the local var // // Returns: // Number of the local to use // // Notes: // This method is invoked only for locals actually used in the // inlinee body. // // Allocates a new temp if necessary, and copies key properties // over from the inlinee local var info. unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason)) { assert(compIsForInlining()); unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum]; if (tmpNum == BAD_VAR_NUM) { const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt]; const var_types lclTyp = inlineeLocal.lclTypeInfo; // The lifetime of this local might span multiple BBs. // So it is a long lifetime local. impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason)); // Copy over key info lvaTable[tmpNum].lvType = lclTyp; lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp; lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned; lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp; lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp; // Copy over class handle for ref types. Note this may be a // shared type -- someday perhaps we can get the exact // signature and pass in a more precise type. if (lclTyp == TYP_REF) { assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp; if (lvaTable[tmpNum].lvSingleDef) { JITDUMP("Marked V%02u as a single def temp\n", tmpNum); } lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef()); } if (inlineeLocal.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo; } } #ifdef DEBUG // Sanity check that we're properly prepared for gc ref locals. if (varTypeIsGC(lclTyp)) { // Since there are gc locals we should have seen them earlier // and if there was a return value, set up the spill temp. assert(impInlineInfo->HasGcRefLocals()); assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp()); } else { // Make sure all pinned locals count as gc refs. assert(!inlineeLocal.lclIsPinned); } #endif // DEBUG } return tmpNum; } //------------------------------------------------------------------------ // impInlineFetchArg: return tree node for argument value in an inlinee // // Arguments: // lclNum -- argument number in inlinee IL // inlArgInfo -- argument info for inlinee // lclVarInfo -- var info for inlinee // // Returns: // Tree for the argument's value. Often an inlinee-scoped temp // GT_LCL_VAR but can be other tree kinds, if the argument // expression from the caller can be directly substituted into the // inlinee body. // // Notes: // Must be used only for arguments -- use impInlineFetchLocal for // inlinee locals. // // Direct substitution is performed when the formal argument cannot // change value in the inlinee body (no starg or ldarga), and the // actual argument expression's value cannot be changed if it is // substituted it into the inlinee body. // // Even if an inlinee-scoped temp is returned here, it may later be // "bashed" to a caller-supplied tree when arguments are actually // passed (see fgInlinePrependStatements). Bashing can happen if // the argument ends up being single use and other conditions are // met. So the contents of the tree returned here may not end up // being the ones ultimately used for the argument. // // This method will side effect inlArgInfo. It should only be called // for actual uses of the argument in the inlinee. GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo) { // Cache the relevant arg and lcl info for this argument. // We will modify argInfo but not lclVarInfo. InlArgInfo& argInfo = inlArgInfo[lclNum]; const InlLclVarInfo& lclInfo = lclVarInfo[lclNum]; const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp; const var_types lclTyp = lclInfo.lclTypeInfo; GenTree* op1 = nullptr; GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal(); if (argInfo.argIsInvariant && !argCanBeModified) { // Directly substitute constants or addresses of locals // // Clone the constant. Note that we cannot directly use // argNode in the trees even if !argInfo.argIsUsed as this // would introduce aliasing between inlArgInfo[].argNode and // impInlineExpr. Then gtFoldExpr() could change it, causing // further references to the argument working off of the // bashed copy. op1 = gtCloneExpr(argNode); PREFIX_ASSUME(op1 != nullptr); argInfo.argTmpNum = BAD_VAR_NUM; // We may need to retype to ensure we match the callee's view of the type. // Otherwise callee-pass throughs of arguments can create return type // mismatches that block inlining. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. if (op1->TypeGet() != lclTyp) { op1->gtType = genActualType(lclTyp); } } else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef) { // Directly substitute unaliased caller locals for args that cannot be modified // // Use the caller-supplied node if this is the first use. op1 = argNode; unsigned argLclNum = op1->AsLclVarCommon()->GetLclNum(); argInfo.argTmpNum = argLclNum; // Use an equivalent copy if this is the second or subsequent // use. // // Note argument type mismatches that prevent inlining should // have been caught in impInlineInitVars. If inlining is not prevented // but a cast is necessary, we similarly expect it to have been inserted then. // So here we may have argument type mismatches that are benign, for instance // passing a TYP_SHORT local (eg. normalized-on-load) as a TYP_INT arg. // The exception is when the inlining means we should start tracking the argument. if (argInfo.argIsUsed || ((lclTyp == TYP_BYREF) && (op1->TypeGet() != TYP_BYREF))) { assert(op1->gtOper == GT_LCL_VAR); assert(lclNum == op1->AsLclVar()->gtLclILoffs); // Create a new lcl var node - remember the argument lclNum op1 = impCreateLocalNode(argLclNum DEBUGARG(op1->AsLclVar()->gtLclILoffs)); // Start tracking things as a byref if the parameter is a byref. if (lclTyp == TYP_BYREF) { op1->gtType = TYP_BYREF; } } } else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp) { /* Argument is a by-ref address to a struct, a normed struct, or its field. In these cases, don't spill the byref to a local, simply clone the tree and use it. This way we will increase the chance for this byref to be optimized away by a subsequent "dereference" operation. From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal. For example, if the caller is: ldloca.s V_1 // V_1 is a local struct call void Test.ILPart::RunLdargaOnPointerArg(int32*) and the callee being inlined has: .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed ldarga.s ptrToInts call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**) then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR. */ assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL); op1 = gtCloneExpr(argNode); } else { /* Argument is a complex expression - it must be evaluated into a temp */ if (argInfo.argHasTmp) { assert(argInfo.argIsUsed); assert(argInfo.argTmpNum < lvaCount); /* Create a new lcl var node - remember the argument lclNum */ op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp)); /* This is the second or later use of the this argument, so we have to use the temp (instead of the actual arg) */ argInfo.argBashTmpNode = nullptr; } else { /* First time use */ assert(!argInfo.argIsUsed); /* Reserve a temp for the expression. * Use a large size node as we may change it later */ const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg")); lvaTable[tmpNum].lvType = lclTyp; // For ref types, determine the type of the temp. if (lclTyp == TYP_REF) { if (!argCanBeModified) { // If the arg can't be modified in the method // body, use the type of the value, if // known. Otherwise, use the declared type. assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmpNum); lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } else { // Arg might be modified, use the declared type of // the argument. lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } } assert(!lvaTable[tmpNum].IsAddressExposed()); if (argInfo.argHasLdargaOp) { lvaTable[tmpNum].lvHasLdAddrOp = 1; } if (lclInfo.lclVerTypeInfo.IsStruct()) { if (varTypeIsStruct(lclTyp)) { lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */); if (info.compIsVarArgs) { lvaSetStructUsedAsVarArg(tmpNum); } } else { // This is a wrapped primitive. Make sure the verstate knows that lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo; } } argInfo.argHasTmp = true; argInfo.argTmpNum = tmpNum; // If we require strict exception order, then arguments must // be evaluated in sequence before the body of the inlined method. // So we need to evaluate them to a temp. // Also, if arguments have global or local references, we need to // evaluate them to a temp before the inlined body as the // inlined body may be modifying the global ref. // TODO-1stClassStructs: We currently do not reuse an existing lclVar // if it is a struct, because it requires some additional handling. if ((!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef && !argInfo.argHasCallerLocalRef)) { /* Get a *LARGE* LCL_VAR node */ op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp) DEBUGARG(lclNum)); /* Record op1 as the very first use of this argument. If there are no further uses of the arg, we may be able to use the actual arg node instead of the temp. If we do see any further uses, we will clear this. */ argInfo.argBashTmpNode = op1; } else { /* Get a small LCL_VAR node */ op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp)); /* No bashing of this argument */ argInfo.argBashTmpNode = nullptr; } } } // Mark this argument as used. argInfo.argIsUsed = true; return op1; } /****************************************************************************** Is this the original "this" argument to the call being inlined? Note that we do not inline methods with "starg 0", and so we do not need to worry about it. */ bool Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); return (tree->gtOper == GT_LCL_VAR && tree->AsLclVarCommon()->GetLclNum() == inlArgInfo[0].argTmpNum); } //----------------------------------------------------------------------------- // impInlineIsGuaranteedThisDerefBeforeAnySideEffects: Check if a dereference in // the inlinee can guarantee that the "this" pointer is non-NULL. // // Arguments: // additionalTree - a tree to check for side effects // additionalCallArgs - a list of call args to check for side effects // dereferencedAddress - address expression being dereferenced // inlArgInfo - inlinee argument information // // Notes: // If we haven't hit a branch or a side effect, and we are dereferencing // from 'this' to access a field or make GTF_CALL_NULLCHECK call, // then we can avoid a separate null pointer check. // // The importer stack and current statement list are searched for side effects. // Trees that have been popped of the stack but haven't been appended to the // statement list and have to be checked for side effects may be provided via // additionalTree and additionalCallArgs. // bool Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTree, GenTreeCall::Use* additionalCallArgs, GenTree* dereferencedAddress, InlArgInfo* inlArgInfo) { assert(compIsForInlining()); assert(opts.OptEnabled(CLFLG_INLINING)); BasicBlock* block = compCurBB; if (block != fgFirstBB) { return false; } if (!impInlineIsThis(dereferencedAddress, inlArgInfo)) { return false; } if ((additionalTree != nullptr) && GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTree->gtFlags)) { return false; } for (GenTreeCall::Use& use : GenTreeCall::UseList(additionalCallArgs)) { if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(use.GetNode()->gtFlags)) { return false; } } for (Statement* stmt : StatementList(impStmtList)) { GenTree* expr = stmt->GetRootNode(); if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags)) { return false; } } for (unsigned level = 0; level < verCurrentState.esStackDepth; level++) { GenTreeFlags stackTreeFlags = verCurrentState.esStack[level].val->gtFlags; if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags)) { return false; } } return true; } //------------------------------------------------------------------------ // impMarkInlineCandidate: determine if this call can be subsequently inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // Mostly a wrapper for impMarkInlineCandidateHelper that also undoes // guarded devirtualization for virtual calls where the method we'd // devirtualize to cannot be inlined. void Compiler::impMarkInlineCandidate(GenTree* callNode, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { GenTreeCall* call = callNode->AsCall(); // Do the actual evaluation impMarkInlineCandidateHelper(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo); // If this call is an inline candidate or is not a guarded devirtualization // candidate, we're done. if (call->IsInlineCandidate() || !call->IsGuardedDevirtualizationCandidate()) { return; } // If we can't inline the call we'd guardedly devirtualize to, // we undo the guarded devirtualization, as the benefit from // just guarded devirtualization alone is likely not worth the // extra jit time and code size. // // TODO: it is possibly interesting to allow this, but requires // fixes elsewhere too... JITDUMP("Revoking guarded devirtualization candidacy for call [%06u]: target method can't be inlined\n", dspTreeID(call)); call->ClearGuardedDevirtualizationCandidate(); } //------------------------------------------------------------------------ // impMarkInlineCandidateHelper: determine if this call can be subsequently // inlined // // Arguments: // callNode -- call under scrutiny // exactContextHnd -- context handle for inlining // exactContextNeedsRuntimeLookup -- true if context required runtime lookup // callInfo -- call info from VM // // Notes: // If callNode is an inline candidate, this method sets the flag // GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have // filled in the associated InlineCandidateInfo. // // If callNode is not an inline candidate, and the reason is // something that is inherent to the method being called, the // method may be marked as "noinline" to short-circuit any // future assessments of calls to this method. void Compiler::impMarkInlineCandidateHelper(GenTreeCall* call, CORINFO_CONTEXT_HANDLE exactContextHnd, bool exactContextNeedsRuntimeLookup, CORINFO_CALL_INFO* callInfo) { // Let the strategy know there's another call impInlineRoot()->m_inlineStrategy->NoteCall(); if (!opts.OptEnabled(CLFLG_INLINING)) { /* XXX Mon 8/18/2008 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and * figure out why we did not set MAXOPT for this compile. */ assert(!compIsForInlining()); return; } if (compIsForImportOnly()) { // Don't bother creating the inline candidate during verification. // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification // that leads to the creation of multiple instances of Compiler. return; } InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate"); // Don't inline if not optimizing root method if (opts.compDbgCode) { inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN); return; } // Don't inline if inlining into this method is disabled. if (impInlineRoot()->m_inlineStrategy->IsInliningDisabled()) { inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE); return; } // Don't inline into callers that use the NextCallReturnAddress intrinsic. if (info.compHasNextCallRetAddr) { inlineResult.NoteFatal(InlineObservation::CALLER_USES_NEXT_CALL_RET_ADDR); return; } // Inlining candidate determination needs to honor only IL tail prefix. // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive). if (call->IsTailPrefixedCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX); return; } // Delegate Invoke method doesn't have a body and gets special cased instead. // Don't even bother trying to inline it. if (call->IsDelegateInvoke()) { inlineResult.NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY); return; } // Tail recursion elimination takes precedence over inlining. // TODO: We may want to do some of the additional checks from fgMorphCall // here to reduce the chance we don't inline a call that won't be optimized // as a fast tail call or turned into a loop. if (gtIsRecursiveCall(call) && call->IsImplicitTailCall()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL); return; } if (call->IsVirtual()) { // Allow guarded devirt calls to be treated as inline candidates, // but reject all other virtual calls. if (!call->IsGuardedDevirtualizationCandidate()) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT); return; } } /* Ignore helper calls */ if (call->gtCallType == CT_HELPER) { assert(!call->IsGuardedDevirtualizationCandidate()); inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER); return; } /* Ignore indirect calls */ if (call->gtCallType == CT_INDIRECT) { inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED); return; } /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding * inlining in throw blocks. I should consider the same thing for catch and filter regions. */ CORINFO_METHOD_HANDLE fncHandle; unsigned methAttr; if (call->IsGuardedDevirtualizationCandidate()) { if (call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle != nullptr) { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodUnboxedEntryHandle; } else { fncHandle = call->gtGuardedDevirtualizationCandidateInfo->guardedMethodHandle; } methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } else { fncHandle = call->gtCallMethHnd; // Reuse method flags from the original callInfo if possible if (fncHandle == callInfo->hMethod) { methAttr = callInfo->methodFlags; } else { methAttr = info.compCompHnd->getMethodAttribs(fncHandle); } } #ifdef DEBUG if (compStressCompile(STRESS_FORCE_INLINE, 0)) { methAttr |= CORINFO_FLG_FORCEINLINE; } #endif // Check for COMPlus_AggressiveInlining if (compDoAggressiveInlining) { methAttr |= CORINFO_FLG_FORCEINLINE; } if (!(methAttr & CORINFO_FLG_FORCEINLINE)) { /* Don't bother inline blocks that are in the filter region */ if (bbInCatchHandlerILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the catch handler region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH); return; } if (bbInFilterILRange(compCurBB)) { #ifdef DEBUG if (verbose) { printf("\nWill not inline blocks that are in the filter region\n"); } #endif inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER); return; } } /* Check if we tried to inline this method before */ if (methAttr & CORINFO_FLG_DONT_INLINE) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE); return; } /* Cannot inline synchronized methods */ if (methAttr & CORINFO_FLG_SYNCH) { inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED); return; } /* Check legality of PInvoke callsite (for inlining of marshalling code) */ if (methAttr & CORINFO_FLG_PINVOKE) { // See comment in impCheckForPInvokeCall BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; if (!impCanPInvokeInlineCallSite(block)) { inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH); return; } } InlineCandidateInfo* inlineCandidateInfo = nullptr; impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult); if (inlineResult.IsFailure()) { return; } // The old value should be null OR this call should be a guarded devirtualization candidate. assert((call->gtInlineCandidateInfo == nullptr) || call->IsGuardedDevirtualizationCandidate()); // The new value should not be null. assert(inlineCandidateInfo != nullptr); inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup; call->gtInlineCandidateInfo = inlineCandidateInfo; // If we're in an inlinee compiler, and have a return spill temp, and this inline candidate // is also a tail call candidate, it can use the same return spill temp. // if (compIsForInlining() && call->CanTailCall() && (impInlineInfo->inlineCandidateInfo->preexistingSpillTemp != BAD_VAR_NUM)) { inlineCandidateInfo->preexistingSpillTemp = impInlineInfo->inlineCandidateInfo->preexistingSpillTemp; JITDUMP("Inline candidate [%06u] can share spill temp V%02u\n", dspTreeID(call), inlineCandidateInfo->preexistingSpillTemp); } // Mark the call node as inline candidate. call->gtFlags |= GTF_CALL_INLINE_CANDIDATE; // Let the strategy know there's another candidate. impInlineRoot()->m_inlineStrategy->NoteCandidate(); // Since we're not actually inlining yet, and this call site is // still just an inline candidate, there's nothing to report. inlineResult.SetReported(); } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by target-specific // instructions bool Compiler::IsTargetIntrinsic(NamedIntrinsic intrinsicName) { #if defined(TARGET_XARCH) switch (intrinsicName) { // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1 // instructions to directly compute round/ceiling/floor/truncate. case NI_System_Math_Abs: case NI_System_Math_Sqrt: return true; case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: return compOpportunisticallyDependsOn(InstructionSet_SSE41); case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_FMA); default: return false; } #elif defined(TARGET_ARM64) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Ceiling: case NI_System_Math_Floor: case NI_System_Math_Truncate: case NI_System_Math_Round: case NI_System_Math_Sqrt: case NI_System_Math_Max: case NI_System_Math_Min: return true; case NI_System_Math_FusedMultiplyAdd: return compOpportunisticallyDependsOn(InstructionSet_AdvSimd); default: return false; } #elif defined(TARGET_ARM) switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Round: case NI_System_Math_Sqrt: return true; default: return false; } #else // TODO: This portion of logic is not implemented for other arch. // The reason for returning true is that on all other arch the only intrinsic // enabled are target intrinsics. return true; #endif } /******************************************************************************/ // Returns true if the given intrinsic will be implemented by calling System.Math // methods. bool Compiler::IsIntrinsicImplementedByUserCall(NamedIntrinsic intrinsicName) { // Currently, if a math intrinsic is not implemented by target-specific // instructions, it will be implemented by a System.Math call. In the // future, if we turn to implementing some of them with helper calls, // this predicate needs to be revisited. return !IsTargetIntrinsic(intrinsicName); } bool Compiler::IsMathIntrinsic(NamedIntrinsic intrinsicName) { switch (intrinsicName) { case NI_System_Math_Abs: case NI_System_Math_Acos: case NI_System_Math_Acosh: case NI_System_Math_Asin: case NI_System_Math_Asinh: case NI_System_Math_Atan: case NI_System_Math_Atanh: case NI_System_Math_Atan2: case NI_System_Math_Cbrt: case NI_System_Math_Ceiling: case NI_System_Math_Cos: case NI_System_Math_Cosh: case NI_System_Math_Exp: case NI_System_Math_Floor: case NI_System_Math_FMod: case NI_System_Math_FusedMultiplyAdd: case NI_System_Math_ILogB: case NI_System_Math_Log: case NI_System_Math_Log2: case NI_System_Math_Log10: case NI_System_Math_Max: case NI_System_Math_Min: case NI_System_Math_Pow: case NI_System_Math_Round: case NI_System_Math_Sin: case NI_System_Math_Sinh: case NI_System_Math_Sqrt: case NI_System_Math_Tan: case NI_System_Math_Tanh: case NI_System_Math_Truncate: { assert((intrinsicName > NI_SYSTEM_MATH_START) && (intrinsicName < NI_SYSTEM_MATH_END)); return true; } default: { assert((intrinsicName < NI_SYSTEM_MATH_START) || (intrinsicName > NI_SYSTEM_MATH_END)); return false; } } } bool Compiler::IsMathIntrinsic(GenTree* tree) { return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->AsIntrinsic()->gtIntrinsicName); } //------------------------------------------------------------------------ // impDevirtualizeCall: Attempt to change a virtual vtable call into a // normal call // // Arguments: // call -- the call node to examine/modify // pResolvedToken -- [IN] the resolved token used to create the call. Used for R2R. // method -- [IN/OUT] the method handle for call. Updated iff call devirtualized. // methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized. // pContextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized. // pExactContextHandle -- [OUT] updated context handle iff call devirtualized // isLateDevirtualization -- if devirtualization is happening after importation // isExplicitTailCalll -- [IN] true if we plan on using an explicit tail call // ilOffset -- IL offset of the call // // Notes: // Virtual calls in IL will always "invoke" the base class method. // // This transformation looks for evidence that the type of 'this' // in the call is exactly known, is a final class or would invoke // a final method, and if that and other safety checks pan out, // modifies the call and the call info to create a direct call. // // This transformation is initially done in the importer and not // in some subsequent optimization pass because we want it to be // upstream of inline candidate identification. // // However, later phases may supply improved type information that // can enable further devirtualization. We currently reinvoke this // code after inlining, if the return value of the inlined call is // the 'this obj' of a subsequent virtual call. // // If devirtualization succeeds and the call's this object is a // (boxed) value type, the jit will ask the EE for the unboxed entry // point. If this exists, the jit will invoke the unboxed entry // on the box payload. In addition if the boxing operation is // visible to the jit and the call is the only consmer of the box, // the jit will try analyze the box to see if the call can be instead // instead made on a local copy. If that is doable, the call is // updated to invoke the unboxed entry on the local copy and the // boxing operation is removed. // // When guarded devirtualization is enabled, this method will mark // calls as guarded devirtualization candidates, if the type of `this` // is not exactly known, and there is a plausible guess for the type. void Compiler::impDevirtualizeCall(GenTreeCall* call, CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_METHOD_HANDLE* method, unsigned* methodFlags, CORINFO_CONTEXT_HANDLE* pContextHandle, CORINFO_CONTEXT_HANDLE* pExactContextHandle, bool isLateDevirtualization, bool isExplicitTailCall, IL_OFFSET ilOffset) { assert(call != nullptr); assert(method != nullptr); assert(methodFlags != nullptr); assert(pContextHandle != nullptr); // This should be a virtual vtable or virtual stub call. // assert(call->IsVirtual()); // Possibly instrument. Note for OSR+PGO we will instrument when // optimizing and (currently) won't devirtualize. We may want // to revisit -- if we can devirtualize we should be able to // suppress the probe. // // We strip BBINSTR from inlinees currently, so we'll only // do this for the root method calls. // if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR)) { assert(opts.OptimizationDisabled() || opts.IsOSR()); assert(!compIsForInlining()); // During importation, optionally flag this block as one that // contains calls requiring class profiling. Ideally perhaps // we'd just keep track of the calls themselves, so we don't // have to search for them later. // if ((call->gtCallType != CT_INDIRECT) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR) && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT) && (JitConfig.JitClassProfiling() > 0) && !isLateDevirtualization) { JITDUMP("\n ... marking [%06u] in " FMT_BB " for class profile instrumentation\n", dspTreeID(call), compCurBB->bbNum); ClassProfileCandidateInfo* pInfo = new (this, CMK_Inlining) ClassProfileCandidateInfo; // Record some info needed for the class profiling probe. // pInfo->ilOffset = ilOffset; pInfo->probeIndex = info.compClassProbeCount++; call->gtClassProfileCandidateInfo = pInfo; // Flag block as needing scrutiny // compCurBB->bbFlags |= BBF_HAS_CLASS_PROFILE; } return; } // Bail if optimizations are disabled. if (opts.OptimizationDisabled()) { return; } #if defined(DEBUG) // Bail if devirt is disabled. if (JitConfig.JitEnableDevirtualization() == 0) { return; } // Optionally, print info on devirtualization Compiler* const rootCompiler = impInlineRoot(); const bool doPrint = JitConfig.JitPrintDevirtualizedMethods().contains(rootCompiler->info.compMethodName, rootCompiler->info.compClassName, &rootCompiler->info.compMethodInfo->args); #endif // DEBUG // Fetch information about the virtual method we're calling. CORINFO_METHOD_HANDLE baseMethod = *method; unsigned baseMethodAttribs = *methodFlags; if (baseMethodAttribs == 0) { // For late devirt we may not have method attributes, so fetch them. baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); } else { #if defined(DEBUG) // Validate that callInfo has up to date method flags const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod); // All the base method attributes should agree, save that // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1 // because of concurrent jitting activity. // // Note we don't look at this particular flag bit below, and // later on (if we do try and inline) we will rediscover why // the method can't be inlined, so there's no danger here in // seeing this particular flag bit in different states between // the cached and fresh values. if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE)) { assert(!"mismatched method attributes"); } #endif // DEBUG } // In R2R mode, we might see virtual stub calls to // non-virtuals. For instance cases where the non-virtual method // is in a different assembly but is called via CALLVIRT. For // verison resilience we must allow for the fact that the method // might become virtual in some update. // // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a // regular call+nullcheck upstream, so we won't reach this // point. if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0) { assert(call->IsVirtualStub()); assert(opts.IsReadyToRun()); JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n"); return; } // Fetch information about the class that introduced the virtual method. CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod); const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass); // Is the call an interface call? const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0; // See what we know about the type of 'this' in the call. GenTree* thisObj = call->gtCallThisArg->GetNode()->gtEffectiveVal(false); bool isExact = false; bool objIsNonNull = false; CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull); // Bail if we know nothing. if (objClass == NO_CLASS_HANDLE) { JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet())); // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG("unknown")); return; } // If the objClass is sealed (final), then we may be able to devirtualize. const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass); const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0; #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; const char* objClassNote = "[?]"; const char* objClassName = "?objClass"; const char* baseClassName = "?baseClass"; const char* baseMethodName = "?baseMethod"; if (verbose || doPrint) { objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : ""; objClassName = info.compCompHnd->getClassName(objClass); baseClassName = info.compCompHnd->getClassName(baseClass); baseMethodName = eeGetMethodName(baseMethod, nullptr); if (verbose) { printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n" " class for 'this' is %s%s (attrib %08x)\n" " base method is %s::%s\n", callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName); } } #endif // defined(DEBUG) // See if the jit's best type for `obj` is an interface. // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal // IL_021d: ldloc.0 // IL_021e: callvirt instance int32 System.Object::GetHashCode() // // If so, we can't devirtualize, but we may be able to do guarded devirtualization. // if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0) { // Don't try guarded devirtualiztion when we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // If we get this far, the jit has a lower bound class type for the `this` object being used for dispatch. // It may or may not know enough to devirtualize... if (isInterface) { assert(call->IsVirtualStub()); JITDUMP("--- base class is interface\n"); } // Fetch the method that would be called based on the declared type of 'this', // and prepare to fetch the method attributes. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = objClass; dvInfo.context = *pContextHandle; dvInfo.detail = CORINFO_DEVIRTUALIZATION_UNKNOWN; dvInfo.pResolvedTokenVirtualMethod = pResolvedToken; info.compCompHnd->resolveVirtualMethod(&dvInfo); CORINFO_METHOD_HANDLE derivedMethod = dvInfo.devirtualizedMethod; CORINFO_CONTEXT_HANDLE exactContext = dvInfo.exactContext; CORINFO_CLASS_HANDLE derivedClass = NO_CLASS_HANDLE; CORINFO_RESOLVED_TOKEN* pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedMethod; if (derivedMethod != nullptr) { assert(exactContext != nullptr); assert(((size_t)exactContext & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS); derivedClass = (CORINFO_CLASS_HANDLE)((size_t)exactContext & ~CORINFO_CONTEXTFLAGS_MASK); } DWORD derivedMethodAttribs = 0; bool derivedMethodIsFinal = false; bool canDevirtualize = false; #if defined(DEBUG) const char* derivedClassName = "?derivedClass"; const char* derivedMethodName = "?derivedMethod"; const char* note = "inexact or not final"; #endif // If we failed to get a method handle, we can't directly devirtualize. // // This can happen when prejitting, if the devirtualization crosses // servicing bubble boundaries, or if objClass is a shared class. // if (derivedMethod == nullptr) { JITDUMP("--- no derived method: %s\n", devirtualizationDetailToString(dvInfo.detail)); } else { // Fetch method attributes to see if method is marked final. derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod); derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0); #if defined(DEBUG) if (isExact) { note = "exact"; } else if (objClassIsFinal) { note = "final class"; } else if (derivedMethodIsFinal) { note = "final method"; } if (verbose || doPrint) { derivedMethodName = eeGetMethodName(derivedMethod, nullptr); derivedClassName = eeGetClassName(derivedClass); if (verbose) { printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note); gtDispTree(call); } } #endif // defined(DEBUG) canDevirtualize = isExact || objClassIsFinal || (!isInterface && derivedMethodIsFinal); } // We still might be able to do a guarded devirtualization. // Note the call might be an interface call or a virtual call. // if (!canDevirtualize) { JITDUMP(" Class not final or exact%s\n", isInterface ? "" : ", and method not final"); #if defined(DEBUG) // If we know the object type exactly, we generally expect we can devirtualize. // (don't when doing late devirt as we won't have an owner type (yet)) // if (!isLateDevirtualization && (isExact || objClassIsFinal) && JitConfig.JitNoteFailedExactDevirtualization()) { printf("@@@ Exact/Final devirt failure in %s at [%06u] $ %s\n", info.compFullName, dspTreeID(call), devirtualizationDetailToString(dvInfo.detail)); } #endif // Don't try guarded devirtualiztion if we're doing late devirtualization. // if (isLateDevirtualization) { JITDUMP("No guarded devirt during late devirtualization\n"); return; } considerGuardedDevirtualization(call, ilOffset, isInterface, baseMethod, baseClass, pContextHandle DEBUGARG(objClass) DEBUGARG(objClassName)); return; } // All checks done. Time to transform the call. // // We should always have an exact class context. // // Note that wouldnt' be true if the runtime side supported array interface devirt, // the resulting method would be a generic method of the non-generic SZArrayHelper class. // assert(canDevirtualize); JITDUMP(" %s; can devirtualize\n", note); // Make the updates. call->gtFlags &= ~GTF_CALL_VIRT_VTABLE; call->gtFlags &= ~GTF_CALL_VIRT_STUB; call->gtCallMethHnd = derivedMethod; call->gtCallType = CT_USER_FUNC; call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED; // Virtual calls include an implicit null check, which we may // now need to make explicit. if (!objIsNonNull) { call->gtFlags |= GTF_CALL_NULLCHECK; } // Clear the inline candidate info (may be non-null since // it's a union field used for other things by virtual // stubs) call->gtInlineCandidateInfo = nullptr; #if defined(DEBUG) if (verbose) { printf("... after devirt...\n"); gtDispTree(call); } if (doPrint) { printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName, baseMethodName, derivedClassName, derivedMethodName, note); } // If we successfully devirtualized based on an exact or final class, // and we have dynamic PGO data describing the likely class, make sure they agree. // // If pgo source is not dynamic we may see likely classes from other versions of this code // where types had different properties. // // If method is an inlinee we may be specializing to a class that wasn't seen at runtime. // const bool canSensiblyCheck = (isExact || objClassIsFinal) && (fgPgoSource == ICorJitInfo::PgoSource::Dynamic) && !compIsForInlining(); if (JitConfig.JitCrossCheckDevirtualizationAndPGO() && canSensiblyCheck) { // We only can handle a single likely class for now const int maxLikelyClasses = 1; LikelyClassRecord likelyClasses[maxLikelyClasses]; UINT32 numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); UINT32 likelihood = likelyClasses[0].likelihood; CORINFO_CLASS_HANDLE likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses > 0) { // PGO had better agree the class we devirtualized to is plausible. // if (likelyClass != derivedClass) { // Managed type system may report different addresses for a class handle // at different times....? // // Also, AOT may have a more nuanced notion of class equality. // if (!opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) { bool mismatch = true; // derivedClass will be the introducer of derived method, so it's possible // likelyClass is a non-overriding subclass. Check up the hierarchy. // CORINFO_CLASS_HANDLE parentClass = likelyClass; while (parentClass != NO_CLASS_HANDLE) { if (parentClass == derivedClass) { mismatch = false; break; } parentClass = info.compCompHnd->getParentType(parentClass); } if (mismatch || (numberOfClasses != 1) || (likelihood != 100)) { printf("@@@ Likely %p (%s) != Derived %p (%s) [n=%u, l=%u, il=%u] in %s \n", likelyClass, eeGetClassName(likelyClass), derivedClass, eeGetClassName(derivedClass), numberOfClasses, likelihood, ilOffset, info.compFullName); } assert(!(mismatch || (numberOfClasses != 1) || (likelihood != 100))); } } } } #endif // defined(DEBUG) // If the 'this' object is a value class, see if we can rework the call to invoke the // unboxed entry. This effectively inlines the normally un-inlineable wrapper stub // and exposes the potentially inlinable unboxed entry method. // // We won't optimize explicit tail calls, as ensuring we get the right tail call info // is tricky (we'd need to pass an updated sig and resolved token back to some callers). // // Note we may not have a derived class in some cases (eg interface call on an array) // if (info.compCompHnd->isValueClass(derivedClass)) { if (isExplicitTailCall) { JITDUMP("Have a direct explicit tail call to boxed entry point; can't optimize further\n"); } else { JITDUMP("Have a direct call to boxed entry point. Trying to optimize to call an unboxed entry point\n"); // Note for some shared methods the unboxed entry point requires an extra parameter. bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethod = info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg); if (unboxedEntryMethod != nullptr) { bool optimizedTheBox = false; // If the 'this' object is a local box, see if we can revise things // to not require boxing. // if (thisObj->IsBoxedValue() && !isExplicitTailCall) { // Since the call is the only consumer of the box, we know the box can't escape // since it is being passed an interior pointer. // // So, revise the box to simply create a local copy, use the address of that copy // as the this pointer, and update the entry point to the unboxed entry. // // Ideally, we then inline the boxed method and and if it turns out not to modify // the copy, we can undo the copy too. if (requiresInstMethodTableArg) { // Perform a trial box removal and ask for the type handle tree that fed the box. // JITDUMP("Unboxed entry needs method table arg...\n"); GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE); if (methodTableArg != nullptr) { // If that worked, turn the box into a copy to a local var // JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg)); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { // Pass the local var as this and the type handle as a new arg // JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table " "arg\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } call->gtCallMethHnd = unboxedEntryMethod; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethodAttribs = unboxedMethodAttribs; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n"); } } else { JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n"); } } else { JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n"); GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY); if (localCopyThis != nullptr) { JITDUMP("Success! invoking unboxed entry point on local copy\n"); call->gtCallThisArg = gtNewCallArgs(localCopyThis); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; optimizedTheBox = true; } else { JITDUMP("Sorry, failed to undo the box\n"); } } if (optimizedTheBox) { #if FEATURE_TAILCALL_OPT if (call->IsImplicitTailCall()) { JITDUMP("Clearing the implicit tail call flag\n"); // If set, we clear the implicit tail call flag // as we just introduced a new address taken local variable // call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL; } #endif // FEATURE_TAILCALL_OPT } } if (!optimizedTheBox) { // If we get here, we have a boxed value class that either wasn't boxed // locally, or was boxed locally but we were unable to remove the box for // various reasons. // // We can still update the call to invoke the unboxed entry, if the // boxed value is simple. // if (requiresInstMethodTableArg) { // Get the method table from the boxed object. // GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const clonedThisArg = gtClone(thisArg); if (clonedThisArg == nullptr) { JITDUMP( "unboxed entry needs MT arg, but `this` was too complex to clone. Deferring update.\n"); } else { JITDUMP("revising call to invoke unboxed entry with additional method table arg\n"); GenTree* const methodTableArg = gtNewMethodTableLookup(clonedThisArg); // Update the 'this' pointer to refer to the box payload // GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; // Method attributes will differ because unboxed entry point is shared // const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod); JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs, unboxedMethodAttribs); derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; derivedMethodAttribs = unboxedMethodAttribs; // Add the method table argument. // // Prepend for R2L arg passing or empty L2R passing // Append for non-empty L2R // if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr)) { // If there's a ret buf, the method table is the second arg. // if (call->HasRetBufArg()) { gtInsertNewCallArgAfter(methodTableArg, call->gtCallArgs); } else { call->gtCallArgs = gtPrependNewCallArg(methodTableArg, call->gtCallArgs); } } else { GenTreeCall::Use* beforeArg = call->gtCallArgs; while (beforeArg->GetNext() != nullptr) { beforeArg = beforeArg->GetNext(); } beforeArg->SetNext(gtNewCallArgs(methodTableArg)); } } } else { JITDUMP("revising call to invoke unboxed entry\n"); GenTree* const thisArg = call->gtCallThisArg->GetNode(); GenTree* const payloadOffset = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL); GenTree* const boxPayload = gtNewOperNode(GT_ADD, TYP_BYREF, thisArg, payloadOffset); call->gtCallThisArg = gtNewCallArgs(boxPayload); call->gtCallMethHnd = unboxedEntryMethod; call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED; derivedMethod = unboxedEntryMethod; pDerivedResolvedToken = &dvInfo.resolvedTokenDevirtualizedUnboxedMethod; } } } else { // Many of the low-level methods on value classes won't have unboxed entries, // as they need access to the type of the object. // // Note this may be a cue for us to stack allocate the boxed object, since // we probably know that these objects don't escape. JITDUMP("Sorry, failed to find unboxed entry point\n"); } } } // Need to update call info too. // *method = derivedMethod; *methodFlags = derivedMethodAttribs; // Update context handle // *pContextHandle = MAKE_METHODCONTEXT(derivedMethod); // Update exact context handle. // if (pExactContextHandle != nullptr) { *pExactContextHandle = MAKE_CLASSCONTEXT(derivedClass); } #ifdef FEATURE_READYTORUN if (opts.IsReadyToRun()) { // For R2R, getCallInfo triggers bookkeeping on the zap // side and acquires the actual symbol to call so we need to call it here. // Look up the new call info. CORINFO_CALL_INFO derivedCallInfo; eeGetCallInfo(pDerivedResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, &derivedCallInfo); // Update the call. call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT; call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT; call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup); } #endif // FEATURE_READYTORUN } //------------------------------------------------------------------------ // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call // to an intrinsic returns an exact type // // Arguments: // methodHnd -- handle for the special intrinsic method // // Returns: // Exact class handle returned by the intrinsic call, if known. // Nullptr if not known, or not likely to lead to beneficial optimization. CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd) { JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd)); CORINFO_CLASS_HANDLE result = nullptr; // See what intrinisc we have... const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd); switch (ni) { case NI_System_Collections_Generic_Comparer_get_Default: case NI_System_Collections_Generic_EqualityComparer_get_Default: { // Expect one class generic parameter; figure out which it is. CORINFO_SIG_INFO sig; info.compCompHnd->getMethodSig(methodHnd, &sig); assert(sig.sigInst.classInstCount == 1); CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0]; assert(typeHnd != nullptr); // Lookup can incorrect when we have __Canon as it won't appear // to implement any interface types. // // And if we do not have a final type, devirt & inlining is // unlikely to result in much simplification. // // We can use CORINFO_FLG_FINAL to screen out both of these cases. const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd); const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0); if (isFinalType) { if (ni == NI_System_Collections_Generic_EqualityComparer_get_Default) { result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd); } else { assert(ni == NI_System_Collections_Generic_Comparer_get_Default); result = info.compCompHnd->getDefaultComparerClass(typeHnd); } JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd), result != nullptr ? eeGetClassName(result) : "unknown"); } else { JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd)); } break; } default: { JITDUMP("This special intrinsic not handled, sorry...\n"); break; } } return result; } //------------------------------------------------------------------------ // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it. // // Arguments: // token - init value for the allocated token. // // Return Value: // pointer to token into jit-allocated memory. CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(const CORINFO_RESOLVED_TOKEN& token) { CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1); *memory = token; return memory; } //------------------------------------------------------------------------ // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables. // class SpillRetExprHelper { public: SpillRetExprHelper(Compiler* comp) : comp(comp) { } void StoreRetExprResultsInArgs(GenTreeCall* call) { for (GenTreeCall::Use& use : call->Args()) { comp->fgWalkTreePre(&use.NodeRef(), SpillRetExprVisitor, this); } if (call->gtCallThisArg != nullptr) { comp->fgWalkTreePre(&call->gtCallThisArg->NodeRef(), SpillRetExprVisitor, this); } } private: static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre) { assert((pTree != nullptr) && (*pTree != nullptr)); GenTree* tree = *pTree; if ((tree->gtFlags & GTF_CALL) == 0) { // Trees with ret_expr are marked as GTF_CALL. return Compiler::WALK_SKIP_SUBTREES; } if (tree->OperGet() == GT_RET_EXPR) { SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData); walker->StoreRetExprAsLocalVar(pTree); } return Compiler::WALK_CONTINUE; } void StoreRetExprAsLocalVar(GenTree** pRetExpr) { GenTree* retExpr = *pRetExpr; assert(retExpr->OperGet() == GT_RET_EXPR); const unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr")); JITDUMP("Storing return expression [%06u] to a local var V%02u.\n", comp->dspTreeID(retExpr), tmp); comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE); *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet()); if (retExpr->TypeGet() == TYP_REF) { assert(comp->lvaTable[tmp].lvSingleDef == 0); comp->lvaTable[tmp].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmp); bool isExact = false; bool isNonNull = false; CORINFO_CLASS_HANDLE retClsHnd = comp->gtGetClassHandle(retExpr, &isExact, &isNonNull); if (retClsHnd != nullptr) { comp->lvaSetClass(tmp, retClsHnd, isExact); } } } private: Compiler* comp; }; //------------------------------------------------------------------------ // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate. // Spill ret_expr in the call node, because they can't be cloned. // // Arguments: // call - fat calli candidate // void Compiler::addFatPointerCandidate(GenTreeCall* call) { JITDUMP("Marking call [%06u] as fat pointer candidate\n", dspTreeID(call)); setMethodHasFatPointer(); call->SetFatPointerCandidate(); SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); } //------------------------------------------------------------------------ // considerGuardedDevirtualization: see if we can profitably guess at the // class involved in an interface or virtual call. // // Arguments: // // call - potential guarded devirtualization candidate // ilOffset - IL ofset of the call instruction // isInterface - true if this is an interface call // baseMethod - target method of the call // baseClass - class that introduced the target method // pContextHandle - context handle for the call // objClass - class of 'this' in the call // objClassName - name of the obj Class // // Notes: // Consults with VM to see if there's a likely class at runtime, // if so, adds a candidate for guarded devirtualization. // void Compiler::considerGuardedDevirtualization( GenTreeCall* call, IL_OFFSET ilOffset, bool isInterface, CORINFO_METHOD_HANDLE baseMethod, CORINFO_CLASS_HANDLE baseClass, CORINFO_CONTEXT_HANDLE* pContextHandle DEBUGARG(CORINFO_CLASS_HANDLE objClass) DEBUGARG(const char* objClassName)) { #if defined(DEBUG) const char* callKind = isInterface ? "interface" : "virtual"; #endif JITDUMP("Considering guarded devirtualization at IL offset %u (0x%x)\n", ilOffset, ilOffset); // We currently only get likely class guesses when there is PGO data // with class profiles. // if (fgPgoClassProfiles == 0) { JITDUMP("Not guessing for class: no class profile pgo data, or pgo disabled\n"); return; } // See if there's a likely guess for the class. // const unsigned likelihoodThreshold = isInterface ? 25 : 30; unsigned likelihood = 0; unsigned numberOfClasses = 0; CORINFO_CLASS_HANDLE likelyClass = NO_CLASS_HANDLE; bool doRandomDevirt = false; const int maxLikelyClasses = 32; LikelyClassRecord likelyClasses[maxLikelyClasses]; #ifdef DEBUG // Optional stress mode to pick a random known class, rather than // the most likely known class. // doRandomDevirt = JitConfig.JitRandomGuardedDevirtualization() != 0; if (doRandomDevirt) { // Reuse the random inliner's random state. // CLRRandom* const random = impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomGuardedDevirtualization()); likelyClasses[0].clsHandle = getRandomClass(fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset, random); likelyClasses[0].likelihood = 100; if (likelyClasses[0].clsHandle != NO_CLASS_HANDLE) { numberOfClasses = 1; } } else #endif { numberOfClasses = getLikelyClasses(likelyClasses, maxLikelyClasses, fgPgoSchema, fgPgoSchemaCount, fgPgoData, ilOffset); } // For now we only use the most popular type likelihood = likelyClasses[0].likelihood; likelyClass = likelyClasses[0].clsHandle; if (numberOfClasses < 1) { JITDUMP("No likely class, sorry\n"); return; } assert(likelyClass != NO_CLASS_HANDLE); // Print all likely classes JITDUMP("%s classes for %p (%s):\n", doRandomDevirt ? "Random" : "Likely", dspPtr(objClass), objClassName) for (UINT32 i = 0; i < numberOfClasses; i++) { JITDUMP(" %u) %p (%s) [likelihood:%u%%]\n", i + 1, likelyClasses[i].clsHandle, eeGetClassName(likelyClasses[i].clsHandle), likelyClasses[i].likelihood); } // Todo: a more advanced heuristic using likelihood, number of // classes, and the profile count for this block. // // For now we will guess if the likelihood is at least 25%/30% (intfc/virt), as studies // have shown this transformation should pay off even if we guess wrong sometimes. // if (likelihood < likelihoodThreshold) { JITDUMP("Not guessing for class; likelihood is below %s call threshold %u\n", callKind, likelihoodThreshold); return; } uint32_t const likelyClassAttribs = info.compCompHnd->getClassAttribs(likelyClass); if ((likelyClassAttribs & CORINFO_FLG_ABSTRACT) != 0) { // We may see an abstract likely class, if we have a stale profile. // No point guessing for this. // JITDUMP("Not guessing for class; abstract (stale profile)\n"); return; } // Figure out which method will be called. // CORINFO_DEVIRTUALIZATION_INFO dvInfo; dvInfo.virtualMethod = baseMethod; dvInfo.objClass = likelyClass; dvInfo.context = *pContextHandle; dvInfo.exactContext = *pContextHandle; dvInfo.pResolvedTokenVirtualMethod = nullptr; const bool canResolve = info.compCompHnd->resolveVirtualMethod(&dvInfo); if (!canResolve) { JITDUMP("Can't figure out which method would be invoked, sorry\n"); return; } CORINFO_METHOD_HANDLE likelyMethod = dvInfo.devirtualizedMethod; JITDUMP("%s call would invoke method %s\n", callKind, eeGetMethodName(likelyMethod, nullptr)); // Add this as a potential candidate. // uint32_t const likelyMethodAttribs = info.compCompHnd->getMethodAttribs(likelyMethod); addGuardedDevirtualizationCandidate(call, likelyMethod, likelyClass, likelyMethodAttribs, likelyClassAttribs, likelihood); } //------------------------------------------------------------------------ // addGuardedDevirtualizationCandidate: potentially mark the call as a guarded // devirtualization candidate // // Notes: // // Call sites in rare or unoptimized code, and calls that require cookies are // not marked as candidates. // // As part of marking the candidate, the code spills GT_RET_EXPRs anywhere in any // child tree, because and we need to clone all these trees when we clone the call // as part of guarded devirtualization, and these IR nodes can't be cloned. // // Arguments: // call - potential guarded devirtualization candidate // methodHandle - method that will be invoked if the class test succeeds // classHandle - class that will be tested for at runtime // methodAttr - attributes of the method // classAttr - attributes of the class // likelihood - odds that this class is the class seen at runtime // void Compiler::addGuardedDevirtualizationCandidate(GenTreeCall* call, CORINFO_METHOD_HANDLE methodHandle, CORINFO_CLASS_HANDLE classHandle, unsigned methodAttr, unsigned classAttr, unsigned likelihood) { // This transformation only makes sense for virtual calls assert(call->IsVirtual()); // Only mark calls if the feature is enabled. const bool isEnabled = JitConfig.JitEnableGuardedDevirtualization() > 0; if (!isEnabled) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- disabled by jit config\n", dspTreeID(call)); return; } // Bail if not optimizing or the call site is very likely cold if (compCurBB->isRunRarely() || opts.OptimizationDisabled()) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- rare / dbg / minopts\n", dspTreeID(call)); return; } // CT_INDIRECT calls may use the cookie, bail if so... // // If transforming these provides a benefit, we could save this off in the same way // we save the stub address below. if ((call->gtCallType == CT_INDIRECT) && (call->AsCall()->gtCallCookie != nullptr)) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- CT_INDIRECT with cookie\n", dspTreeID(call)); return; } #ifdef DEBUG // See if disabled by range // static ConfigMethodRange JitGuardedDevirtualizationRange; JitGuardedDevirtualizationRange.EnsureInit(JitConfig.JitGuardedDevirtualizationRange()); assert(!JitGuardedDevirtualizationRange.Error()); if (!JitGuardedDevirtualizationRange.Contains(impInlineRoot()->info.compMethodHash())) { JITDUMP("NOT Marking call [%06u] as guarded devirtualization candidate -- excluded by " "JitGuardedDevirtualizationRange", dspTreeID(call)); return; } #endif // We're all set, proceed with candidate creation. // JITDUMP("Marking call [%06u] as guarded devirtualization candidate; will guess for class %s\n", dspTreeID(call), eeGetClassName(classHandle)); setMethodHasGuardedDevirtualization(); call->SetGuardedDevirtualizationCandidate(); // Spill off any GT_RET_EXPR subtrees so we can clone the call. // SpillRetExprHelper helper(this); helper.StoreRetExprResultsInArgs(call); // Gather some information for later. Note we actually allocate InlineCandidateInfo // here, as the devirtualized half of this call will likely become an inline candidate. // GuardedDevirtualizationCandidateInfo* pInfo = new (this, CMK_Inlining) InlineCandidateInfo; pInfo->guardedMethodHandle = methodHandle; pInfo->guardedMethodUnboxedEntryHandle = nullptr; pInfo->guardedClassHandle = classHandle; pInfo->likelihood = likelihood; pInfo->requiresInstMethodTableArg = false; // If the guarded class is a value class, look for an unboxed entry point. // if ((classAttr & CORINFO_FLG_VALUECLASS) != 0) { JITDUMP(" ... class is a value class, looking for unboxed entry\n"); bool requiresInstMethodTableArg = false; CORINFO_METHOD_HANDLE unboxedEntryMethodHandle = info.compCompHnd->getUnboxedEntry(methodHandle, &requiresInstMethodTableArg); if (unboxedEntryMethodHandle != nullptr) { JITDUMP(" ... updating GDV candidate with unboxed entry info\n"); pInfo->guardedMethodUnboxedEntryHandle = unboxedEntryMethodHandle; pInfo->requiresInstMethodTableArg = requiresInstMethodTableArg; } } call->gtGuardedDevirtualizationCandidateInfo = pInfo; } void Compiler::addExpRuntimeLookupCandidate(GenTreeCall* call) { setMethodHasExpRuntimeLookup(); call->SetExpRuntimeLookup(); } //------------------------------------------------------------------------ // impIsClassExact: check if a class handle can only describe values // of exactly one class. // // Arguments: // classHnd - handle for class in question // // Returns: // true if class is final and not subject to special casting from // variance or similar. // // Note: // We are conservative on arrays of primitive types here. bool Compiler::impIsClassExact(CORINFO_CLASS_HANDLE classHnd) { DWORD flags = info.compCompHnd->getClassAttribs(classHnd); DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY; if ((flags & flagsMask) == CORINFO_FLG_FINAL) { return true; } if ((flags & flagsMask) == (CORINFO_FLG_FINAL | CORINFO_FLG_ARRAY)) { CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType type = info.compCompHnd->getChildType(classHnd, &arrayElementHandle); if ((type == CORINFO_TYPE_CLASS) || (type == CORINFO_TYPE_VALUECLASS)) { return impIsClassExact(arrayElementHandle); } } return false; } //------------------------------------------------------------------------ // impCanSkipCovariantStoreCheck: see if storing a ref type value to an array // can skip the array store covariance check. // // Arguments: // value -- tree producing the value to store // array -- tree representing the array to store to // // Returns: // true if the store does not require a covariance check. // bool Compiler::impCanSkipCovariantStoreCheck(GenTree* value, GenTree* array) { // We should only call this when optimizing. assert(opts.OptimizationEnabled()); // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j] if (value->OperIs(GT_INDEX) && array->OperIs(GT_LCL_VAR)) { GenTree* valueIndex = value->AsIndex()->Arr(); if (valueIndex->OperIs(GT_LCL_VAR)) { unsigned valueLcl = valueIndex->AsLclVar()->GetLclNum(); unsigned arrayLcl = array->AsLclVar()->GetLclNum(); if ((valueLcl == arrayLcl) && !lvaGetDesc(arrayLcl)->IsAddressExposed()) { JITDUMP("\nstelem of ref from same array: skipping covariant store check\n"); return true; } } } // Check for assignment of NULL. if (value->OperIs(GT_CNS_INT)) { assert(value->gtType == TYP_REF); if (value->AsIntCon()->gtIconVal == 0) { JITDUMP("\nstelem of null: skipping covariant store check\n"); return true; } // Non-0 const refs can only occur with frozen objects assert(value->IsIconHandle(GTF_ICON_STR_HDL)); assert(doesMethodHaveFrozenString() || (compIsForInlining() && impInlineInfo->InlinerCompiler->doesMethodHaveFrozenString())); } // Try and get a class handle for the array if (value->gtType != TYP_REF) { return false; } bool arrayIsExact = false; bool arrayIsNonNull = false; CORINFO_CLASS_HANDLE arrayHandle = gtGetClassHandle(array, &arrayIsExact, &arrayIsNonNull); if (arrayHandle == NO_CLASS_HANDLE) { return false; } // There are some methods in corelib where we're storing to an array but the IL // doesn't reflect this (see SZArrayHelper). Avoid. DWORD attribs = info.compCompHnd->getClassAttribs(arrayHandle); if ((attribs & CORINFO_FLG_ARRAY) == 0) { return false; } CORINFO_CLASS_HANDLE arrayElementHandle = nullptr; CorInfoType arrayElemType = info.compCompHnd->getChildType(arrayHandle, &arrayElementHandle); // Verify array type handle is really an array of ref type assert(arrayElemType == CORINFO_TYPE_CLASS); // Check for exactly object[] if (arrayIsExact && (arrayElementHandle == impGetObjectClass())) { JITDUMP("\nstelem to (exact) object[]: skipping covariant store check\n"); return true; } const bool arrayTypeIsSealed = impIsClassExact(arrayElementHandle); if ((!arrayIsExact && !arrayTypeIsSealed) || (arrayElementHandle == NO_CLASS_HANDLE)) { // Bail out if we don't know array's exact type return false; } bool valueIsExact = false; bool valueIsNonNull = false; CORINFO_CLASS_HANDLE valueHandle = gtGetClassHandle(value, &valueIsExact, &valueIsNonNull); // Array's type is sealed and equals to value's type if (arrayTypeIsSealed && (valueHandle == arrayElementHandle)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } // Array's type is not sealed but we know its exact type if (arrayIsExact && (valueHandle != NO_CLASS_HANDLE) && (info.compCompHnd->compareTypesForCast(valueHandle, arrayElementHandle) == TypeCompareState::Must)) { JITDUMP("\nstelem to T[] with T exact: skipping covariant store check\n"); return true; } return false; }
1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/jit/jitconfigvalues.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #if !defined(CONFIG_INTEGER) || !defined(CONFIG_STRING) || !defined(CONFIG_METHODSET) #error CONFIG_INTEGER, CONFIG_STRING, and CONFIG_METHODSET must be defined before including this file. #endif // !defined(CONFIG_INTEGER) || !defined(CONFIG_STRING) || !defined(CONFIG_METHODSET) #ifdef DEBUG #define OPT_CONFIG // Enable optimization level configuration. #endif #if defined(DEBUG) /// /// JIT /// CONFIG_INTEGER(AltJitLimit, W("AltJitLimit"), 0) // Max number of functions to use altjit for (decimal) CONFIG_INTEGER(AltJitSkipOnAssert, W("AltJitSkipOnAssert"), 0) // If AltJit hits an assert, fall back to the fallback // JIT. Useful in conjunction with // COMPlus_ContinueOnAssert=1 CONFIG_INTEGER(BreakOnDumpToken, W("BreakOnDumpToken"), 0xffffffff) // Breaks when using internal logging on a // particular token value. CONFIG_INTEGER(DebugBreakOnVerificationFailure, W("DebugBreakOnVerificationFailure"), 0) // Halts the jit on // verification failure CONFIG_INTEGER(DiffableDasm, W("JitDiffableDasm"), 0) // Make the disassembly diff-able CONFIG_INTEGER(JitDasmWithAddress, W("JitDasmWithAddress"), 0) // Print the process address next to each instruction of // the disassembly CONFIG_INTEGER(DisplayLoopHoistStats, W("JitLoopHoistStats"), 0) // Display JIT loop hoisting statistics CONFIG_INTEGER(DisplayLsraStats, W("JitLsraStats"), 0) // Display JIT Linear Scan Register Allocator statistics // If set to "1", display the stats in textual format. // If set to "2", display the stats in csv format. // If set to "3", display the stats in summarize format. // Recommended to use with JitStdOutFile flag. CONFIG_STRING(JitLsraOrdering, W("JitLsraOrdering")) // LSRA heuristics ordering CONFIG_INTEGER(DumpJittedMethods, W("DumpJittedMethods"), 0) // Prints all jitted methods to the console CONFIG_INTEGER(EnablePCRelAddr, W("JitEnablePCRelAddr"), 1) // Whether absolute addr be encoded as PC-rel offset by // RyuJIT where possible CONFIG_INTEGER(JitAssertOnMaxRAPasses, W("JitAssertOnMaxRAPasses"), 0) CONFIG_INTEGER(JitBreakEmitOutputInstr, W("JitBreakEmitOutputInstr"), -1) CONFIG_INTEGER(JitBreakMorphTree, W("JitBreakMorphTree"), 0xffffffff) CONFIG_INTEGER(JitBreakOnBadCode, W("JitBreakOnBadCode"), 0) CONFIG_INTEGER(JitBreakOnMinOpts, W("JITBreakOnMinOpts"), 0) // Halt if jit switches to MinOpts CONFIG_INTEGER(JitBreakOnUnsafeCode, W("JitBreakOnUnsafeCode"), 0) CONFIG_INTEGER(JitCloneLoops, W("JitCloneLoops"), 1) // If 0, don't clone. Otherwise clone loops for optimizations. CONFIG_INTEGER(JitDebugLogLoopCloning, W("JitDebugLogLoopCloning"), 0) // In debug builds log places where loop cloning // optimizations are performed on the fast path. CONFIG_INTEGER(JitDefaultFill, W("JitDefaultFill"), 0xdd) // In debug builds, initialize the memory allocated by the nra // with this byte. CONFIG_INTEGER(JitAlignLoopMinBlockWeight, W("JitAlignLoopMinBlockWeight"), DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT) // Minimum weight needed for the first block of a loop to make it a // candidate for alignment. CONFIG_INTEGER(JitAlignLoopMaxCodeSize, W("JitAlignLoopMaxCodeSize"), DEFAULT_MAX_LOOPSIZE_FOR_ALIGN) // For non-adaptive alignment, minimum loop size (in bytes) for which // alignment will be done. // Defaults to 3 blocks of 32 bytes chunks = 96 bytes. CONFIG_INTEGER(JitAlignLoopBoundary, W("JitAlignLoopBoundary"), DEFAULT_ALIGN_LOOP_BOUNDARY) // For non-adaptive alignment, address boundary (power of 2) at which loop // alignment should be done. By default, 32B. CONFIG_INTEGER(JitAlignLoopForJcc, W("JitAlignLoopForJcc"), 0) // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary. CONFIG_INTEGER(JitAlignLoopAdaptive, W("JitAlignLoopAdaptive"), 1) // If set, perform adaptive loop alignment that limits number of padding based on loop size. CONFIG_INTEGER(JitHideAlignBehindJmp, W("JitHideAlignBehindJmp"), 1) // If set, try to hide align instruction (if any) behind an unconditional jump instruction (if any) // that is present before the loop start. // Print the alignment boundaries in disassembly. CONFIG_INTEGER(JitDasmWithAlignmentBoundaries, W("JitDasmWithAlignmentBoundaries"), 0) CONFIG_INTEGER(JitDirectAlloc, W("JitDirectAlloc"), 0) CONFIG_INTEGER(JitDoubleAlign, W("JitDoubleAlign"), 1) CONFIG_INTEGER(JitDumpASCII, W("JitDumpASCII"), 1) // Uses only ASCII characters in tree dumps CONFIG_INTEGER(JitDumpTerseLsra, W("JitDumpTerseLsra"), 1) // Produce terse dump output for LSRA CONFIG_INTEGER(JitDumpToDebugger, W("JitDumpToDebugger"), 0) // Output JitDump output to the debugger CONFIG_INTEGER(JitDumpVerboseSsa, W("JitDumpVerboseSsa"), 0) // Produce especially verbose dump output for SSA CONFIG_INTEGER(JitDumpVerboseTrees, W("JitDumpVerboseTrees"), 0) // Enable more verbose tree dumps CONFIG_INTEGER(JitEmitPrintRefRegs, W("JitEmitPrintRefRegs"), 0) CONFIG_INTEGER(JitEnableDevirtualization, W("JitEnableDevirtualization"), 1) // Enable devirtualization in importer CONFIG_INTEGER(JitEnableLateDevirtualization, W("JitEnableLateDevirtualization"), 1) // Enable devirtualization after // inlining CONFIG_INTEGER(JitExpensiveDebugCheckLevel, W("JitExpensiveDebugCheckLevel"), 0) // Level indicates how much checking // beyond the default to do in debug // builds (currently 1-2) CONFIG_INTEGER(JitForceFallback, W("JitForceFallback"), 0) // Set to non-zero to test NOWAY assert by forcing a retry CONFIG_INTEGER(JitFullyInt, W("JitFullyInt"), 0) // Forces Fully interruptible code CONFIG_INTEGER(JitFunctionTrace, W("JitFunctionTrace"), 0) // If non-zero, print JIT start/end logging CONFIG_INTEGER(JitGCChecks, W("JitGCChecks"), 0) CONFIG_INTEGER(JitGCInfoLogging, W("JitGCInfoLogging"), 0) // If true, prints GCInfo-related output to standard output. CONFIG_INTEGER(JitHashBreak, W("JitHashBreak"), -1) // Same as JitBreak, but for a method hash CONFIG_INTEGER(JitHashDump, W("JitHashDump"), -1) // Same as JitDump, but for a method hash CONFIG_INTEGER(JitHashHalt, W("JitHashHalt"), -1) // Same as JitHalt, but for a method hash CONFIG_INTEGER(JitInlineAdditionalMultiplier, W("JitInlineAdditionalMultiplier"), 0) CONFIG_INTEGER(JitInlinePrintStats, W("JitInlinePrintStats"), 0) CONFIG_INTEGER(JitInlineSize, W("JITInlineSize"), DEFAULT_MAX_INLINE_SIZE) CONFIG_INTEGER(JitInlineDepth, W("JITInlineDepth"), DEFAULT_MAX_INLINE_DEPTH) CONFIG_INTEGER(JitLongAddress, W("JitLongAddress"), 0) // Force using the large pseudo instruction form for long address CONFIG_INTEGER(JitMaxUncheckedOffset, W("JitMaxUncheckedOffset"), 8) CONFIG_INTEGER(JitMinOpts, W("JITMinOpts"), 0) // Forces MinOpts CONFIG_INTEGER(JitMinOptsBbCount, W("JITMinOptsBbCount"), DEFAULT_MIN_OPTS_BB_COUNT) // Internal jit control of MinOpts CONFIG_INTEGER(JitMinOptsCodeSize, W("JITMinOptsCodeSize"), DEFAULT_MIN_OPTS_CODE_SIZE) // Internal jit control of // MinOpts CONFIG_INTEGER(JitMinOptsInstrCount, W("JITMinOptsInstrCount"), DEFAULT_MIN_OPTS_INSTR_COUNT) // Internal jit control of // MinOpts CONFIG_INTEGER(JitMinOptsLvNumCount, W("JITMinOptsLvNumcount"), DEFAULT_MIN_OPTS_LV_NUM_COUNT) // Internal jit control // of MinOpts CONFIG_INTEGER(JitMinOptsLvRefCount, W("JITMinOptsLvRefcount"), DEFAULT_MIN_OPTS_LV_REF_COUNT) // Internal jit control // of MinOpts CONFIG_INTEGER(JitNoCSE, W("JitNoCSE"), 0) CONFIG_INTEGER(JitNoCSE2, W("JitNoCSE2"), 0) CONFIG_INTEGER(JitNoForceFallback, W("JitNoForceFallback"), 0) // Set to non-zero to prevent NOWAY assert testing. // Overrides COMPlus_JitForceFallback and JIT stress // flags. CONFIG_INTEGER(JitNoForwardSub, W("JitNoForwardSub"), 0) // Disables forward sub CONFIG_INTEGER(JitNoHoist, W("JitNoHoist"), 0) CONFIG_INTEGER(JitNoInline, W("JitNoInline"), 0) // Disables inlining of all methods CONFIG_INTEGER(JitNoMemoryBarriers, W("JitNoMemoryBarriers"), 0) // If 1, don't generate memory barriers CONFIG_INTEGER(JitNoRegLoc, W("JitNoRegLoc"), 0) CONFIG_INTEGER(JitNoStructPromotion, W("JitNoStructPromotion"), 0) // Disables struct promotion 1 - for all, 2 - for // params. CONFIG_INTEGER(JitNoUnroll, W("JitNoUnroll"), 0) CONFIG_INTEGER(JitOrder, W("JitOrder"), 0) CONFIG_INTEGER(JitQueryCurrentStaticFieldClass, W("JitQueryCurrentStaticFieldClass"), 1) CONFIG_INTEGER(JitReportFastTailCallDecisions, W("JitReportFastTailCallDecisions"), 0) CONFIG_INTEGER(JitPInvokeCheckEnabled, W("JITPInvokeCheckEnabled"), 0) CONFIG_INTEGER(JitPInvokeEnabled, W("JITPInvokeEnabled"), 1) // Controls verbosity for JitPrintInlinedMethods. Ignored for JitDump/NgenDump where // it's always set. CONFIG_INTEGER(JitPrintInlinedMethodsVerbose, W("JitPrintInlinedMethodsVerboseLevel"), 0) // Prints a tree of inlinees for a specific method (use '*' for all methods) CONFIG_METHODSET(JitPrintInlinedMethods, W("JitPrintInlinedMethods")) CONFIG_METHODSET(JitPrintDevirtualizedMethods, W("JitPrintDevirtualizedMethods")) CONFIG_INTEGER(JitProfileChecks, W("JitProfileChecks"), 0) // 1 enable in dumps, 2 assert if issues found CONFIG_INTEGER(JitRequired, W("JITRequired"), -1) CONFIG_INTEGER(JitRoundFloat, W("JITRoundFloat"), DEFAULT_ROUND_LEVEL) CONFIG_INTEGER(JitStackAllocToLocalSize, W("JitStackAllocToLocalSize"), DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE) CONFIG_INTEGER(JitSkipArrayBoundCheck, W("JitSkipArrayBoundCheck"), 0) CONFIG_INTEGER(JitSlowDebugChecksEnabled, W("JitSlowDebugChecksEnabled"), 1) // Turn on slow debug checks CONFIG_INTEGER(JitSplitFunctionSize, W("JitSplitFunctionSize"), 0) // On ARM, use this as the maximum function/funclet // size for creating function fragments (and creating // multiple RUNTIME_FUNCTION entries) CONFIG_INTEGER(JitSsaStress, W("JitSsaStress"), 0) // Perturb order of processing of blocks in SSA; 0 = no stress; 1 = // use method hash; * = supplied value as random hash CONFIG_INTEGER(JitStackChecks, W("JitStackChecks"), 0) CONFIG_STRING(JitStdOutFile, W("JitStdOutFile")) // If set, sends JIT's stdout output to this file. CONFIG_INTEGER(JitStress, W("JitStress"), 0) // Internal Jit stress mode: 0 = no stress, 2 = all stress, other = vary // stress based on a hash of the method and this value CONFIG_INTEGER(JitStressBBProf, W("JitStressBBProf"), 0) // Internal Jit stress mode CONFIG_INTEGER(JitStressBiasedCSE, W("JitStressBiasedCSE"), 0x101) // Internal Jit stress mode: decimal bias value // between (0,100) to perform CSE on a candidate. // 100% = All CSEs. 0% = 0 CSE. (> 100) means no // stress. CONFIG_INTEGER(JitStressModeNamesOnly, W("JitStressModeNamesOnly"), 0) // Internal Jit stress: if nonzero, only enable // stress modes listed in JitStressModeNames CONFIG_INTEGER(JitStressRegs, W("JitStressRegs"), 0) CONFIG_INTEGER(JitVNMapSelLimit, W("JitVNMapSelLimit"), 0) // If non-zero, assert if # of VNF_MapSelect applications // considered reaches this CONFIG_INTEGER(NgenHashDump, W("NgenHashDump"), -1) // same as JitHashDump, but for ngen CONFIG_INTEGER(NgenOrder, W("NgenOrder"), 0) CONFIG_INTEGER(RunAltJitCode, W("RunAltJitCode"), 1) // If non-zero, and the compilation succeeds for an AltJit, then // use the code. If zero, then we always throw away the generated // code and fall back to the default compiler. CONFIG_INTEGER(RunComponentUnitTests, W("JitComponentUnitTests"), 0) // Run JIT component unit tests CONFIG_INTEGER(ShouldInjectFault, W("InjectFault"), 0) CONFIG_INTEGER(StressCOMCall, W("StressCOMCall"), 0) CONFIG_INTEGER(TailcallStress, W("TailcallStress"), 0) CONFIG_INTEGER(TreesBeforeAfterMorph, W("JitDumpBeforeAfterMorph"), 0) // If 1, display each tree before/after morphing CONFIG_METHODSET(JitBreak, W("JitBreak")) // Stops in the importer when compiling a specified method CONFIG_METHODSET(JitDebugBreak, W("JitDebugBreak")) CONFIG_METHODSET(JitDisasm, W("JitDisasm")) // Dumps disassembly for specified method CONFIG_STRING(JitDisasmAssemblies, W("JitDisasmAssemblies")) // Only show JitDisasm and related info for methods // from this semicolon-delimited list of assemblies. CONFIG_INTEGER(JitDisasmWithGC, W("JitDisasmWithGC"), 0) // Dump interleaved GC Info for any method disassembled. CONFIG_METHODSET(JitDump, W("JitDump")) // Dumps trees for specified method CONFIG_INTEGER(JitDumpTier0, W("JitDumpTier0"), 1) // Dump tier0 requests CONFIG_INTEGER(JitDumpAtOSROffset, W("JitDumpAtOSROffset"), -1) // Only dump OSR requests for this offset CONFIG_INTEGER(JitDumpInlinePhases, W("JitDumpInlinePhases"), 1) // Dump inline compiler phases CONFIG_METHODSET(JitEHDump, W("JitEHDump")) // Dump the EH table for the method, as reported to the VM CONFIG_METHODSET(JitExclude, W("JitExclude")) CONFIG_METHODSET(JitForceProcedureSplitting, W("JitForceProcedureSplitting")) CONFIG_METHODSET(JitGCDump, W("JitGCDump")) CONFIG_METHODSET(JitDebugDump, W("JitDebugDump")) CONFIG_METHODSET(JitHalt, W("JitHalt")) // Emits break instruction into jitted code CONFIG_METHODSET(JitImportBreak, W("JitImportBreak")) CONFIG_METHODSET(JitInclude, W("JitInclude")) CONFIG_METHODSET(JitLateDisasm, W("JitLateDisasm")) CONFIG_METHODSET(JitMinOptsName, W("JITMinOptsName")) // Forces MinOpts for a named function CONFIG_METHODSET(JitNoProcedureSplitting, W("JitNoProcedureSplitting")) // Disallow procedure splitting for specified // methods CONFIG_METHODSET(JitNoProcedureSplittingEH, W("JitNoProcedureSplittingEH")) // Disallow procedure splitting for // specified methods if they contain // exception handling CONFIG_METHODSET(JitStressOnly, W("JitStressOnly")) // Internal Jit stress mode: stress only the specified method(s) CONFIG_METHODSET(JitUnwindDump, W("JitUnwindDump")) // Dump the unwind codes for the method /// /// NGEN /// CONFIG_METHODSET(NgenDisasm, W("NgenDisasm")) // Same as JitDisasm, but for ngen CONFIG_METHODSET(NgenDump, W("NgenDump")) // Same as JitDump, but for ngen CONFIG_METHODSET(NgenEHDump, W("NgenEHDump")) // Dump the EH table for the method, as reported to the VM CONFIG_METHODSET(NgenGCDump, W("NgenGCDump")) CONFIG_METHODSET(NgenDebugDump, W("NgenDebugDump")) CONFIG_METHODSET(NgenUnwindDump, W("NgenUnwindDump")) // Dump the unwind codes for the method /// /// JIT /// CONFIG_METHODSET(JitDumpFg, W("JitDumpFg")) // Dumps Xml/Dot Flowgraph for specified method CONFIG_STRING(JitDumpFgDir, W("JitDumpFgDir")) // Directory for Xml/Dot flowgraph dump(s) CONFIG_STRING(JitDumpFgFile, W("JitDumpFgFile")) // Filename for Xml/Dot flowgraph dump(s) (default: "default") CONFIG_STRING(JitDumpFgPhase, W("JitDumpFgPhase")) // Phase-based Xml/Dot flowgraph support. Set to the short name of a // phase to see the flowgraph after that phase. Leave unset to dump // after COLD-BLK (determine first cold block) or set to * for all // phases CONFIG_STRING(JitDumpFgPrePhase, W("JitDumpFgPrePhase")) // Same as JitDumpFgPhase, but specifies to dump pre-phase, not post-phase. CONFIG_INTEGER(JitDumpFgDot, W("JitDumpFgDot"), 1) // 0 == dump XML format; non-zero == dump DOT format CONFIG_INTEGER(JitDumpFgEH, W("JitDumpFgEH"), 0) // 0 == no EH regions; non-zero == include EH regions CONFIG_INTEGER(JitDumpFgLoops, W("JitDumpFgLoops"), 0) // 0 == no loop regions; non-zero == include loop regions CONFIG_INTEGER(JitDumpFgConstrained, W("JitDumpFgConstrained"), 1) // 0 == don't constrain to mostly linear layout; // non-zero == force mostly lexical block // linear layout CONFIG_INTEGER(JitDumpFgBlockID, W("JitDumpFgBlockID"), 0) // 0 == display block with bbNum; 1 == display with both // bbNum and bbID CONFIG_INTEGER(JitDumpFgBlockFlags, W("JitDumpFgBlockFlags"), 0) // 0 == don't display block flags; 1 == display flags CONFIG_INTEGER(JitDumpFgLoopFlags, W("JitDumpFgLoopFlags"), 0) // 0 == don't display loop flags; 1 == display flags CONFIG_STRING(JitDumpPreciseDebugInfoFile, W("JitDumpPreciseDebugInfoFile")) CONFIG_INTEGER(JitDisasmWithDebugInfo, W("JitDisasmWithDebugInfo"), 0) CONFIG_STRING(JitLateDisasmTo, W("JITLateDisasmTo")) CONFIG_STRING(JitRange, W("JitRange")) CONFIG_STRING(JitStressModeNames, W("JitStressModeNames")) // Internal Jit stress mode: stress using the given set of // stress mode names, e.g. STRESS_REGS, STRESS_TAILCALL CONFIG_STRING(JitStressModeNamesNot, W("JitStressModeNamesNot")) // Internal Jit stress mode: do NOT stress using the // given set of stress mode names, e.g. STRESS_REGS, // STRESS_TAILCALL CONFIG_STRING(JitStressRange, W("JitStressRange")) // Internal Jit stress mode /// /// NGEN /// CONFIG_METHODSET(NgenDumpFg, W("NgenDumpFg")) // Ngen Xml/Dot flowgraph dump support CONFIG_STRING(NgenDumpFgDir, W("NgenDumpFgDir")) // Ngen Xml/Dot flowgraph dump support CONFIG_STRING(NgenDumpFgFile, W("NgenDumpFgFile")) // Ngen Xml/Dot flowgraph dump support /// /// JIT Hardware Intrinsics /// CONFIG_INTEGER(EnableIncompleteISAClass, W("EnableIncompleteISAClass"), 0) // Enable testing not-yet-implemented // intrinsic classes #endif // defined(DEBUG) #if FEATURE_LOOP_ALIGN CONFIG_INTEGER(JitAlignLoops, W("JitAlignLoops"), 1) // If set, align inner loops #else CONFIG_INTEGER(JitAlignLoops, W("JitAlignLoops"), 0) #endif /// /// JIT /// #ifdef FEATURE_ENABLE_NO_RANGE_CHECKS CONFIG_INTEGER(JitNoRangeChks, W("JitNoRngChks"), 0) // If 1, don't generate range checks #endif // AltJitAssertOnNYI should be 0 on targets where JIT is under development or bring up stage, so as to facilitate // fallback to main JIT on hitting a NYI. #if defined(TARGET_ARM64) || defined(TARGET_X86) CONFIG_INTEGER(AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 0) // Controls the AltJit behavior of NYI stuff #else // !defined(TARGET_ARM64) && !defined(TARGET_X86) CONFIG_INTEGER(AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 1) // Controls the AltJit behavior of NYI stuff #endif // defined(TARGET_ARM64) || defined(TARGET_X86) CONFIG_INTEGER(EnableEHWriteThru, W("EnableEHWriteThru"), 1) // Enable the register allocator to support EH-write thru: // partial enregistration of vars exposed on EH boundaries CONFIG_INTEGER(EnableMultiRegLocals, W("EnableMultiRegLocals"), 1) // Enable the enregistration of locals that are // defined or used in a multireg context. // clang-format on #ifdef FEATURE_SIMD CONFIG_INTEGER(JitDisableSimdVN, W("JitDisableSimdVN"), 0) // Default 0, ValueNumbering of SIMD nodes and HW Intrinsic // nodes enabled // If 1, then disable ValueNumbering of SIMD nodes // If 2, then disable ValueNumbering of HW Intrinsic nodes // If 3, disable both SIMD and HW Intrinsic nodes #endif // FEATURE_SIMD // Default 0, enable the CSE of Constants, including nearby offsets. (only for ARM64) // If 1, disable all the CSE of Constants // If 2, enable the CSE of Constants but don't combine with nearby offsets. (only for ARM64) // If 3, enable the CSE of Constants including nearby offsets. (all platforms) // If 4, enable the CSE of Constants but don't combine with nearby offsets. (all platforms) // CONFIG_INTEGER(JitConstCSE, W("JitConstCSE"), 0) #define CONST_CSE_ENABLE_ARM64 0 #define CONST_CSE_DISABLE_ALL 1 #define CONST_CSE_ENABLE_ARM64_NO_SHARING 2 #define CONST_CSE_ENABLE_ALL 3 #define CONST_CSE_ENABLE_ALL_NO_SHARING 4 /// /// JIT /// #if !defined(DEBUG) && !defined(_DEBUG) CONFIG_INTEGER(JitEnableNoWayAssert, W("JitEnableNoWayAssert"), 0) #else // defined(DEBUG) || defined(_DEBUG) CONFIG_INTEGER(JitEnableNoWayAssert, W("JitEnableNoWayAssert"), 1) #endif // !defined(DEBUG) && !defined(_DEBUG) #if defined(TARGET_AMD64) || defined(TARGET_X86) #define JitMinOptsTrackGCrefs_Default 0 // Not tracking GC refs in MinOpts is new behavior #else #define JitMinOptsTrackGCrefs_Default 1 #endif CONFIG_INTEGER(JitMinOptsTrackGCrefs, W("JitMinOptsTrackGCrefs"), JitMinOptsTrackGCrefs_Default) // Track GC roots // The following should be wrapped inside "#if MEASURE_MEM_ALLOC / #endif", but // some files include this one without bringing in the definitions from "jit.h" // so we don't always know what the "true" value of that flag should be. For now // we take the easy way out and always include the flag, even in release builds // (normally MEASURE_MEM_ALLOC is off for release builds but if it's toggled on // for release in "jit.h" the flag would be missing for some includers). // TODO-Cleanup: need to make 'MEASURE_MEM_ALLOC' well-defined here at all times. CONFIG_INTEGER(DisplayMemStats, W("JitMemStats"), 0) // Display JIT memory usage statistics #if defined(DEBUG) CONFIG_INTEGER(JitEnregStats, W("JitEnregStats"), 0) // Display JIT enregistration statistics #endif // DEBUG CONFIG_INTEGER(JitAggressiveInlining, W("JitAggressiveInlining"), 0) // Aggressive inlining of all methods CONFIG_INTEGER(JitELTHookEnabled, W("JitELTHookEnabled"), 0) // If 1, emit Enter/Leave/TailCall callbacks CONFIG_INTEGER(JitInlineSIMDMultiplier, W("JitInlineSIMDMultiplier"), 3) // Ex lclMAX_TRACKED constant. CONFIG_INTEGER(JitMaxLocalsToTrack, W("JitMaxLocalsToTrack"), 0x400) #if defined(FEATURE_ENABLE_NO_RANGE_CHECKS) CONFIG_INTEGER(JitNoRngChks, W("JitNoRngChks"), 0) // If 1, don't generate range checks #endif // defined(FEATURE_ENABLE_NO_RANGE_CHECKS) #if defined(OPT_CONFIG) CONFIG_INTEGER(JitDoAssertionProp, W("JitDoAssertionProp"), 1) // Perform assertion propagation optimization CONFIG_INTEGER(JitDoCopyProp, W("JitDoCopyProp"), 1) // Perform copy propagation on variables that appear redundant CONFIG_INTEGER(JitDoEarlyProp, W("JitDoEarlyProp"), 1) // Perform Early Value Propagation CONFIG_INTEGER(JitDoLoopHoisting, W("JitDoLoopHoisting"), 1) // Perform loop hoisting on loop invariant values CONFIG_INTEGER(JitDoLoopInversion, W("JitDoLoopInversion"), 1) // Perform loop inversion on "for/while" loops CONFIG_INTEGER(JitDoRangeAnalysis, W("JitDoRangeAnalysis"), 1) // Perform range check analysis CONFIG_INTEGER(JitDoRedundantBranchOpts, W("JitDoRedundantBranchOpts"), 1) // Perform redundant branch optimizations CONFIG_INTEGER(JitDoSsa, W("JitDoSsa"), 1) // Perform Static Single Assignment (SSA) numbering on the variables CONFIG_INTEGER(JitDoValueNumber, W("JitDoValueNumber"), 1) // Perform value numbering on method expressions CONFIG_METHODSET(JitOptRepeat, W("JitOptRepeat")) // Runs optimizer multiple times on the method CONFIG_INTEGER(JitOptRepeatCount, W("JitOptRepeatCount"), 2) // Number of times to repeat opts when repeating #endif // defined(OPT_CONFIG) CONFIG_INTEGER(JitTelemetry, W("JitTelemetry"), 1) // If non-zero, gather JIT telemetry data // Max # of MapSelect's considered for a particular top-level invocation. CONFIG_INTEGER(JitVNMapSelBudget, W("JitVNMapSelBudget"), DEFAULT_MAP_SELECT_BUDGET) CONFIG_INTEGER(TailCallLoopOpt, W("TailCallLoopOpt"), 1) // Convert recursive tail calls to loops CONFIG_METHODSET(AltJit, W("AltJit")) // Enables AltJit and selectively limits it to the specified methods. CONFIG_METHODSET(AltJitNgen, W("AltJitNgen")) // Enables AltJit for NGEN and selectively limits it // to the specified methods. CONFIG_STRING(AltJitExcludeAssemblies, W("AltJitExcludeAssemblies")) // Do not use AltJit on this // semicolon-delimited list of assemblies. CONFIG_INTEGER(JitMeasureIR, W("JitMeasureIR"), 0) // If set, measure the IR size after some phases and report it in // the time log. CONFIG_STRING(JitFuncInfoFile, W("JitFuncInfoLogFile")) // If set, gather JIT function info and write to this file. CONFIG_STRING(JitTimeLogCsv, W("JitTimeLogCsv")) // If set, gather JIT throughput data and write to a CSV file. This // mode must be used in internal retail builds. CONFIG_STRING(TailCallOpt, W("TailCallOpt")) CONFIG_INTEGER(FastTailCalls, W("FastTailCalls"), 1) // If set, allow fast tail calls; otherwise allow only helper-based // calls // for explicit tail calls. CONFIG_INTEGER(JitMeasureNowayAssert, W("JitMeasureNowayAssert"), 0) // Set to 1 to measure noway_assert usage. Only // valid if MEASURE_NOWAY is defined. CONFIG_STRING(JitMeasureNowayAssertFile, W("JitMeasureNowayAssertFile")) // Set to file to write noway_assert usage to a file (if not // set: stdout). Only valid if MEASURE_NOWAY is defined. #if defined(DEBUG) CONFIG_INTEGER(EnableExtraSuperPmiQueries, W("EnableExtraSuperPmiQueries"), 0) // Make extra queries to somewhat // future-proof SuperPmi method contexts. #endif // DEBUG #if defined(DEBUG) || defined(INLINE_DATA) CONFIG_INTEGER(JitInlineDumpData, W("JitInlineDumpData"), 0) CONFIG_INTEGER(JitInlineDumpXml, W("JitInlineDumpXml"), 0) // 1 = full xml (+ failures in DEBUG) // 2 = only methods with inlines (+ failures in DEBUG) // 3 = only methods with inlines, no failures CONFIG_STRING(JitInlineDumpXmlFile, W("JitInlineDumpXmlFile")) CONFIG_INTEGER(JitInlinePolicyDumpXml, W("JitInlinePolicyDumpXml"), 0) CONFIG_INTEGER(JitInlineLimit, W("JitInlineLimit"), -1) CONFIG_INTEGER(JitInlinePolicyDiscretionary, W("JitInlinePolicyDiscretionary"), 0) CONFIG_INTEGER(JitInlinePolicyFull, W("JitInlinePolicyFull"), 0) CONFIG_INTEGER(JitInlinePolicySize, W("JitInlinePolicySize"), 0) CONFIG_INTEGER(JitInlinePolicyRandom, W("JitInlinePolicyRandom"), 0) // nonzero enables; value is the external random // seed CONFIG_INTEGER(JitInlinePolicyReplay, W("JitInlinePolicyReplay"), 0) CONFIG_STRING(JitNoInlineRange, W("JitNoInlineRange")) CONFIG_STRING(JitInlineReplayFile, W("JitInlineReplayFile")) #endif // defined(DEBUG) || defined(INLINE_DATA) // Extended version of DefaultPolicy that includes a more precise IL scan, // relies on PGO if it exists and generally is more aggressive. CONFIG_INTEGER(JitExtDefaultPolicy, W("JitExtDefaultPolicy"), 1) CONFIG_INTEGER(JitExtDefaultPolicyMaxIL, W("JitExtDefaultPolicyMaxIL"), 0x80) CONFIG_INTEGER(JitExtDefaultPolicyMaxILProf, W("JitExtDefaultPolicyMaxILProf"), 0x400) CONFIG_INTEGER(JitExtDefaultPolicyMaxBB, W("JitExtDefaultPolicyMaxBB"), 7) // Inliner uses the following formula for PGO-driven decisions: // // BM = BM * ((1.0 - ProfTrust) + ProfWeight * ProfScale) // // Where BM is a benefit multiplier composed from various observations (e.g. "const arg makes a branch foldable"). // If a profile data can be trusted for 100% we can safely just give up on inlining anything inside cold blocks // (except the cases where inlining in cold blocks improves type info/escape analysis for the whole caller). // For now, it's only applied for dynamic PGO. CONFIG_INTEGER(JitExtDefaultPolicyProfTrust, W("JitExtDefaultPolicyProfTrust"), 0x7) CONFIG_INTEGER(JitExtDefaultPolicyProfScale, W("JitExtDefaultPolicyProfScale"), 0x2A) CONFIG_INTEGER(JitInlinePolicyModel, W("JitInlinePolicyModel"), 0) CONFIG_INTEGER(JitInlinePolicyProfile, W("JitInlinePolicyProfile"), 0) CONFIG_INTEGER(JitInlinePolicyProfileThreshold, W("JitInlinePolicyProfileThreshold"), 40) CONFIG_INTEGER(JitObjectStackAllocation, W("JitObjectStackAllocation"), 0) CONFIG_INTEGER(JitEECallTimingInfo, W("JitEECallTimingInfo"), 0) #if defined(DEBUG) CONFIG_INTEGER(JitEnableFinallyCloning, W("JitEnableFinallyCloning"), 1) CONFIG_INTEGER(JitEnableRemoveEmptyTry, W("JitEnableRemoveEmptyTry"), 1) #endif // DEBUG // Overall master enable for Guarded Devirtualization. CONFIG_INTEGER(JitEnableGuardedDevirtualization, W("JitEnableGuardedDevirtualization"), 1) // Various policies for GuardedDevirtualization CONFIG_INTEGER(JitGuardedDevirtualizationChainLikelihood, W("JitGuardedDevirtualizationChainLikelihood"), 0x4B) // 75 CONFIG_INTEGER(JitGuardedDevirtualizationChainStatements, W("JitGuardedDevirtualizationChainStatements"), 4) #if defined(DEBUG) CONFIG_STRING(JitGuardedDevirtualizationRange, W("JitGuardedDevirtualizationRange")) CONFIG_INTEGER(JitRandomGuardedDevirtualization, W("JitRandomGuardedDevirtualization"), 0) #endif // DEBUG // Enable insertion of patchpoints into Tier0 methods with loops. CONFIG_INTEGER(TC_OnStackReplacement, W("TC_OnStackReplacement"), 0) // Initial patchpoint counter value used by jitted code CONFIG_INTEGER(TC_OnStackReplacement_InitialCounter, W("TC_OnStackReplacement_InitialCounter"), 1000) // Enable partial compilation for Tier0 methods CONFIG_INTEGER(TC_PartialCompilation, W("TC_PartialCompilation"), 0) #if defined(DEBUG) // Randomly sprinkle patchpoints. Value is the likelyhood any given stack-empty point becomes a patchpoint. CONFIG_INTEGER(JitRandomOnStackReplacement, W("JitRandomOnStackReplacement"), 0) // Place patchpoint at the specified IL offset, if possible. Overrides random placement. CONFIG_INTEGER(JitOffsetOnStackReplacement, W("JitOffsetOnStackReplacement"), -1) #endif // debug #if defined(DEBUG) // EnableOsrRange allows you to limit the set of methods that will rely on OSR to escape // from Tier0 code. Methods outside the range that would normally be jitted at Tier0 // and have patchpoints will instead be switched to optimized. CONFIG_STRING(JitEnableOsrRange, W("JitEnableOsrRange")) // EnablePatchpointRange allows you to limit the set of Tier0 methods that // will have patchpoints, and hence control which methods will create OSR methods. // Unlike EnableOsrRange, it will not alter the optimization setting for methods // outside the enabled range. CONFIG_STRING(JitEnablePatchpointRange, W("JitEnablePatchpointRange")) #endif // Profile instrumentation options CONFIG_INTEGER(JitMinimalJitProfiling, W("JitMinimalJitProfiling"), 1) CONFIG_INTEGER(JitMinimalPrejitProfiling, W("JitMinimalPrejitProfiling"), 0) CONFIG_INTEGER(JitCastProfiling, W("JitCastProfiling"), 0) // Profile castclass and isinst CONFIG_INTEGER(JitClassProfiling, W("JitClassProfiling"), 1) // Profile virtual and interface calls CONFIG_INTEGER(JitEdgeProfiling, W("JitEdgeProfiling"), 1) // Profile edges instead of blocks CONFIG_INTEGER(JitCollect64BitCounts, W("JitCollect64BitCounts"), 0) // Collect counts as 64-bit values. // Profile consumption options CONFIG_INTEGER(JitDisablePgo, W("JitDisablePgo"), 0) // Ignore pgo data for all methods #if defined(DEBUG) CONFIG_STRING(JitEnablePgoRange, W("JitEnablePgoRange")) // Enable pgo data for only some methods CONFIG_INTEGER(JitRandomEdgeCounts, W("JitRandomEdgeCounts"), 0) // Substitute random values for edge counts CONFIG_INTEGER(JitCrossCheckDevirtualizationAndPGO, W("JitCrossCheckDevirtualizationAndPGO"), 0) CONFIG_INTEGER(JitNoteFailedExactDevirtualization, W("JitNoteFailedExactDevirtualization"), 0) #endif // debug // Control when Virtual Calls are expanded CONFIG_INTEGER(JitExpandCallsEarly, W("JitExpandCallsEarly"), 1) // Expand Call targets early (in the global morph // phase) // Force the generation of CFG checks CONFIG_INTEGER(JitForceControlFlowGuard, W("JitForceControlFlowGuard"), 0); // JitCFGUseDispatcher values: // 0: Never use dispatcher // 1: Use dispatcher on all platforms that support it // 2: Default behavior, depends on platform (yes on x64, no on arm64) CONFIG_INTEGER(JitCFGUseDispatcher, W("JitCFGUseDispatcher"), 2) #if defined(DEBUG) // JitFunctionFile: Name of a file that contains a list of functions. If the currently compiled function is in the // file, certain other JIT config variables will be active. If the currently compiled function is not in the file, // the specific JIT config variables will not be active. // // Functions are approximately in the format output by JitFunctionTrace, e.g.: // // System.CLRConfig:GetBoolValue(ref,byref):bool (MethodHash=3c54d35e) // -- use the MethodHash, not the function name // // System.CLRConfig:GetBoolValue(ref,byref):bool // -- use just the name // // Lines with leading ";" "#" or "//" are ignored. // // If this is unset, then the JIT config values have their normal behavior. // CONFIG_STRING(JitFunctionFile, W("JitFunctionFile")) #endif // DEBUG #if defined(DEBUG) #if defined(TARGET_ARM64) // JitSaveFpLrWithCalleeSavedRegisters: // 0: use default frame type decision // 1: disable frames that save FP/LR registers with the callee-saved registers (at the top of the frame) // 2: force all frames to use the frame types that save FP/LR registers with the callee-saved registers (at the top // of the frame) CONFIG_INTEGER(JitSaveFpLrWithCalleeSavedRegisters, W("JitSaveFpLrWithCalleeSavedRegisters"), 0) #endif // defined(TARGET_ARM64) #endif // DEBUG CONFIG_INTEGER(JitEnregStructLocals, W("JitEnregStructLocals"), 1) // Allow to enregister locals with struct type. #undef CONFIG_INTEGER #undef CONFIG_STRING #undef CONFIG_METHODSET
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #if !defined(CONFIG_INTEGER) || !defined(CONFIG_STRING) || !defined(CONFIG_METHODSET) #error CONFIG_INTEGER, CONFIG_STRING, and CONFIG_METHODSET must be defined before including this file. #endif // !defined(CONFIG_INTEGER) || !defined(CONFIG_STRING) || !defined(CONFIG_METHODSET) #ifdef DEBUG #define OPT_CONFIG // Enable optimization level configuration. #endif #if defined(DEBUG) /// /// JIT /// CONFIG_INTEGER(AltJitLimit, W("AltJitLimit"), 0) // Max number of functions to use altjit for (decimal) CONFIG_INTEGER(AltJitSkipOnAssert, W("AltJitSkipOnAssert"), 0) // If AltJit hits an assert, fall back to the fallback // JIT. Useful in conjunction with // COMPlus_ContinueOnAssert=1 CONFIG_INTEGER(BreakOnDumpToken, W("BreakOnDumpToken"), 0xffffffff) // Breaks when using internal logging on a // particular token value. CONFIG_INTEGER(DebugBreakOnVerificationFailure, W("DebugBreakOnVerificationFailure"), 0) // Halts the jit on // verification failure CONFIG_INTEGER(DiffableDasm, W("JitDiffableDasm"), 0) // Make the disassembly diff-able CONFIG_INTEGER(JitDasmWithAddress, W("JitDasmWithAddress"), 0) // Print the process address next to each instruction of // the disassembly CONFIG_INTEGER(DisplayLoopHoistStats, W("JitLoopHoistStats"), 0) // Display JIT loop hoisting statistics CONFIG_INTEGER(DisplayLsraStats, W("JitLsraStats"), 0) // Display JIT Linear Scan Register Allocator statistics // If set to "1", display the stats in textual format. // If set to "2", display the stats in csv format. // If set to "3", display the stats in summarize format. // Recommended to use with JitStdOutFile flag. CONFIG_STRING(JitLsraOrdering, W("JitLsraOrdering")) // LSRA heuristics ordering CONFIG_INTEGER(DumpJittedMethods, W("DumpJittedMethods"), 0) // Prints all jitted methods to the console CONFIG_INTEGER(EnablePCRelAddr, W("JitEnablePCRelAddr"), 1) // Whether absolute addr be encoded as PC-rel offset by // RyuJIT where possible CONFIG_INTEGER(JitAssertOnMaxRAPasses, W("JitAssertOnMaxRAPasses"), 0) CONFIG_INTEGER(JitBreakEmitOutputInstr, W("JitBreakEmitOutputInstr"), -1) CONFIG_INTEGER(JitBreakMorphTree, W("JitBreakMorphTree"), 0xffffffff) CONFIG_INTEGER(JitBreakOnBadCode, W("JitBreakOnBadCode"), 0) CONFIG_INTEGER(JitBreakOnMinOpts, W("JITBreakOnMinOpts"), 0) // Halt if jit switches to MinOpts CONFIG_INTEGER(JitBreakOnUnsafeCode, W("JitBreakOnUnsafeCode"), 0) CONFIG_INTEGER(JitCloneLoops, W("JitCloneLoops"), 1) // If 0, don't clone. Otherwise clone loops for optimizations. CONFIG_INTEGER(JitDebugLogLoopCloning, W("JitDebugLogLoopCloning"), 0) // In debug builds log places where loop cloning // optimizations are performed on the fast path. CONFIG_INTEGER(JitDefaultFill, W("JitDefaultFill"), 0xdd) // In debug builds, initialize the memory allocated by the nra // with this byte. CONFIG_INTEGER(JitAlignLoopMinBlockWeight, W("JitAlignLoopMinBlockWeight"), DEFAULT_ALIGN_LOOP_MIN_BLOCK_WEIGHT) // Minimum weight needed for the first block of a loop to make it a // candidate for alignment. CONFIG_INTEGER(JitAlignLoopMaxCodeSize, W("JitAlignLoopMaxCodeSize"), DEFAULT_MAX_LOOPSIZE_FOR_ALIGN) // For non-adaptive alignment, minimum loop size (in bytes) for which // alignment will be done. // Defaults to 3 blocks of 32 bytes chunks = 96 bytes. CONFIG_INTEGER(JitAlignLoopBoundary, W("JitAlignLoopBoundary"), DEFAULT_ALIGN_LOOP_BOUNDARY) // For non-adaptive alignment, address boundary (power of 2) at which loop // alignment should be done. By default, 32B. CONFIG_INTEGER(JitAlignLoopForJcc, W("JitAlignLoopForJcc"), 0) // If set, for non-adaptive alignment, ensure loop jmps are not on or cross alignment boundary. CONFIG_INTEGER(JitAlignLoopAdaptive, W("JitAlignLoopAdaptive"), 1) // If set, perform adaptive loop alignment that limits number of padding based on loop size. CONFIG_INTEGER(JitHideAlignBehindJmp, W("JitHideAlignBehindJmp"), 1) // If set, try to hide align instruction (if any) behind an unconditional jump instruction (if any) // that is present before the loop start. // Print the alignment boundaries in disassembly. CONFIG_INTEGER(JitDasmWithAlignmentBoundaries, W("JitDasmWithAlignmentBoundaries"), 0) CONFIG_INTEGER(JitDirectAlloc, W("JitDirectAlloc"), 0) CONFIG_INTEGER(JitDoubleAlign, W("JitDoubleAlign"), 1) CONFIG_INTEGER(JitDumpASCII, W("JitDumpASCII"), 1) // Uses only ASCII characters in tree dumps CONFIG_INTEGER(JitDumpTerseLsra, W("JitDumpTerseLsra"), 1) // Produce terse dump output for LSRA CONFIG_INTEGER(JitDumpToDebugger, W("JitDumpToDebugger"), 0) // Output JitDump output to the debugger CONFIG_INTEGER(JitDumpVerboseSsa, W("JitDumpVerboseSsa"), 0) // Produce especially verbose dump output for SSA CONFIG_INTEGER(JitDumpVerboseTrees, W("JitDumpVerboseTrees"), 0) // Enable more verbose tree dumps CONFIG_INTEGER(JitEmitPrintRefRegs, W("JitEmitPrintRefRegs"), 0) CONFIG_INTEGER(JitEnableDevirtualization, W("JitEnableDevirtualization"), 1) // Enable devirtualization in importer CONFIG_INTEGER(JitEnableLateDevirtualization, W("JitEnableLateDevirtualization"), 1) // Enable devirtualization after // inlining CONFIG_INTEGER(JitExpensiveDebugCheckLevel, W("JitExpensiveDebugCheckLevel"), 0) // Level indicates how much checking // beyond the default to do in debug // builds (currently 1-2) CONFIG_INTEGER(JitForceFallback, W("JitForceFallback"), 0) // Set to non-zero to test NOWAY assert by forcing a retry CONFIG_INTEGER(JitFullyInt, W("JitFullyInt"), 0) // Forces Fully interruptible code CONFIG_INTEGER(JitFunctionTrace, W("JitFunctionTrace"), 0) // If non-zero, print JIT start/end logging CONFIG_INTEGER(JitGCChecks, W("JitGCChecks"), 0) CONFIG_INTEGER(JitGCInfoLogging, W("JitGCInfoLogging"), 0) // If true, prints GCInfo-related output to standard output. CONFIG_INTEGER(JitHashBreak, W("JitHashBreak"), -1) // Same as JitBreak, but for a method hash CONFIG_INTEGER(JitHashDump, W("JitHashDump"), -1) // Same as JitDump, but for a method hash CONFIG_INTEGER(JitHashHalt, W("JitHashHalt"), -1) // Same as JitHalt, but for a method hash CONFIG_INTEGER(JitInlineAdditionalMultiplier, W("JitInlineAdditionalMultiplier"), 0) CONFIG_INTEGER(JitInlinePrintStats, W("JitInlinePrintStats"), 0) CONFIG_INTEGER(JitInlineSize, W("JITInlineSize"), DEFAULT_MAX_INLINE_SIZE) CONFIG_INTEGER(JitInlineDepth, W("JITInlineDepth"), DEFAULT_MAX_INLINE_DEPTH) CONFIG_INTEGER(JitLongAddress, W("JitLongAddress"), 0) // Force using the large pseudo instruction form for long address CONFIG_INTEGER(JitMaxUncheckedOffset, W("JitMaxUncheckedOffset"), 8) CONFIG_INTEGER(JitMinOpts, W("JITMinOpts"), 0) // Forces MinOpts CONFIG_INTEGER(JitMinOptsBbCount, W("JITMinOptsBbCount"), DEFAULT_MIN_OPTS_BB_COUNT) // Internal jit control of MinOpts CONFIG_INTEGER(JitMinOptsCodeSize, W("JITMinOptsCodeSize"), DEFAULT_MIN_OPTS_CODE_SIZE) // Internal jit control of // MinOpts CONFIG_INTEGER(JitMinOptsInstrCount, W("JITMinOptsInstrCount"), DEFAULT_MIN_OPTS_INSTR_COUNT) // Internal jit control of // MinOpts CONFIG_INTEGER(JitMinOptsLvNumCount, W("JITMinOptsLvNumcount"), DEFAULT_MIN_OPTS_LV_NUM_COUNT) // Internal jit control // of MinOpts CONFIG_INTEGER(JitMinOptsLvRefCount, W("JITMinOptsLvRefcount"), DEFAULT_MIN_OPTS_LV_REF_COUNT) // Internal jit control // of MinOpts CONFIG_INTEGER(JitNoCSE, W("JitNoCSE"), 0) CONFIG_INTEGER(JitNoCSE2, W("JitNoCSE2"), 0) CONFIG_INTEGER(JitNoForceFallback, W("JitNoForceFallback"), 0) // Set to non-zero to prevent NOWAY assert testing. // Overrides COMPlus_JitForceFallback and JIT stress // flags. CONFIG_INTEGER(JitNoForwardSub, W("JitNoForwardSub"), 0) // Disables forward sub CONFIG_INTEGER(JitNoHoist, W("JitNoHoist"), 0) CONFIG_INTEGER(JitNoInline, W("JitNoInline"), 0) // Disables inlining of all methods CONFIG_INTEGER(JitNoMemoryBarriers, W("JitNoMemoryBarriers"), 0) // If 1, don't generate memory barriers CONFIG_INTEGER(JitNoRegLoc, W("JitNoRegLoc"), 0) CONFIG_INTEGER(JitNoStructPromotion, W("JitNoStructPromotion"), 0) // Disables struct promotion 1 - for all, 2 - for // params. CONFIG_INTEGER(JitNoUnroll, W("JitNoUnroll"), 0) CONFIG_INTEGER(JitOrder, W("JitOrder"), 0) CONFIG_INTEGER(JitQueryCurrentStaticFieldClass, W("JitQueryCurrentStaticFieldClass"), 1) CONFIG_INTEGER(JitReportFastTailCallDecisions, W("JitReportFastTailCallDecisions"), 0) CONFIG_INTEGER(JitPInvokeCheckEnabled, W("JITPInvokeCheckEnabled"), 0) CONFIG_INTEGER(JitPInvokeEnabled, W("JITPInvokeEnabled"), 1) // Controls verbosity for JitPrintInlinedMethods. Ignored for JitDump/NgenDump where // it's always set. CONFIG_INTEGER(JitPrintInlinedMethodsVerbose, W("JitPrintInlinedMethodsVerboseLevel"), 0) // Prints a tree of inlinees for a specific method (use '*' for all methods) CONFIG_METHODSET(JitPrintInlinedMethods, W("JitPrintInlinedMethods")) CONFIG_METHODSET(JitPrintDevirtualizedMethods, W("JitPrintDevirtualizedMethods")) CONFIG_INTEGER(JitProfileChecks, W("JitProfileChecks"), 0) // 1 enable in dumps, 2 assert if issues found CONFIG_INTEGER(JitRequired, W("JITRequired"), -1) CONFIG_INTEGER(JitRoundFloat, W("JITRoundFloat"), DEFAULT_ROUND_LEVEL) CONFIG_INTEGER(JitStackAllocToLocalSize, W("JitStackAllocToLocalSize"), DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE) CONFIG_INTEGER(JitSkipArrayBoundCheck, W("JitSkipArrayBoundCheck"), 0) CONFIG_INTEGER(JitSlowDebugChecksEnabled, W("JitSlowDebugChecksEnabled"), 1) // Turn on slow debug checks CONFIG_INTEGER(JitSplitFunctionSize, W("JitSplitFunctionSize"), 0) // On ARM, use this as the maximum function/funclet // size for creating function fragments (and creating // multiple RUNTIME_FUNCTION entries) CONFIG_INTEGER(JitSsaStress, W("JitSsaStress"), 0) // Perturb order of processing of blocks in SSA; 0 = no stress; 1 = // use method hash; * = supplied value as random hash CONFIG_INTEGER(JitStackChecks, W("JitStackChecks"), 0) CONFIG_STRING(JitStdOutFile, W("JitStdOutFile")) // If set, sends JIT's stdout output to this file. CONFIG_INTEGER(JitStress, W("JitStress"), 0) // Internal Jit stress mode: 0 = no stress, 2 = all stress, other = vary // stress based on a hash of the method and this value CONFIG_INTEGER(JitStressBBProf, W("JitStressBBProf"), 0) // Internal Jit stress mode CONFIG_INTEGER(JitStressBiasedCSE, W("JitStressBiasedCSE"), 0x101) // Internal Jit stress mode: decimal bias value // between (0,100) to perform CSE on a candidate. // 100% = All CSEs. 0% = 0 CSE. (> 100) means no // stress. CONFIG_INTEGER(JitStressModeNamesOnly, W("JitStressModeNamesOnly"), 0) // Internal Jit stress: if nonzero, only enable // stress modes listed in JitStressModeNames CONFIG_INTEGER(JitStressRegs, W("JitStressRegs"), 0) CONFIG_INTEGER(JitVNMapSelLimit, W("JitVNMapSelLimit"), 0) // If non-zero, assert if # of VNF_MapSelect applications // considered reaches this CONFIG_INTEGER(NgenHashDump, W("NgenHashDump"), -1) // same as JitHashDump, but for ngen CONFIG_INTEGER(NgenOrder, W("NgenOrder"), 0) CONFIG_INTEGER(RunAltJitCode, W("RunAltJitCode"), 1) // If non-zero, and the compilation succeeds for an AltJit, then // use the code. If zero, then we always throw away the generated // code and fall back to the default compiler. CONFIG_INTEGER(RunComponentUnitTests, W("JitComponentUnitTests"), 0) // Run JIT component unit tests CONFIG_INTEGER(ShouldInjectFault, W("InjectFault"), 0) CONFIG_INTEGER(StressCOMCall, W("StressCOMCall"), 0) CONFIG_INTEGER(TailcallStress, W("TailcallStress"), 0) CONFIG_INTEGER(TreesBeforeAfterMorph, W("JitDumpBeforeAfterMorph"), 0) // If 1, display each tree before/after morphing CONFIG_METHODSET(JitBreak, W("JitBreak")) // Stops in the importer when compiling a specified method CONFIG_METHODSET(JitDebugBreak, W("JitDebugBreak")) CONFIG_METHODSET(JitDisasm, W("JitDisasm")) // Dumps disassembly for specified method CONFIG_STRING(JitDisasmAssemblies, W("JitDisasmAssemblies")) // Only show JitDisasm and related info for methods // from this semicolon-delimited list of assemblies. CONFIG_INTEGER(JitDisasmWithGC, W("JitDisasmWithGC"), 0) // Dump interleaved GC Info for any method disassembled. CONFIG_METHODSET(JitDump, W("JitDump")) // Dumps trees for specified method CONFIG_INTEGER(JitDumpTier0, W("JitDumpTier0"), 1) // Dump tier0 requests CONFIG_INTEGER(JitDumpAtOSROffset, W("JitDumpAtOSROffset"), -1) // Only dump OSR requests for this offset CONFIG_INTEGER(JitDumpInlinePhases, W("JitDumpInlinePhases"), 1) // Dump inline compiler phases CONFIG_METHODSET(JitEHDump, W("JitEHDump")) // Dump the EH table for the method, as reported to the VM CONFIG_METHODSET(JitExclude, W("JitExclude")) CONFIG_METHODSET(JitForceProcedureSplitting, W("JitForceProcedureSplitting")) CONFIG_METHODSET(JitGCDump, W("JitGCDump")) CONFIG_METHODSET(JitDebugDump, W("JitDebugDump")) CONFIG_METHODSET(JitHalt, W("JitHalt")) // Emits break instruction into jitted code CONFIG_METHODSET(JitImportBreak, W("JitImportBreak")) CONFIG_METHODSET(JitInclude, W("JitInclude")) CONFIG_METHODSET(JitLateDisasm, W("JitLateDisasm")) CONFIG_METHODSET(JitMinOptsName, W("JITMinOptsName")) // Forces MinOpts for a named function CONFIG_METHODSET(JitNoProcedureSplitting, W("JitNoProcedureSplitting")) // Disallow procedure splitting for specified // methods CONFIG_METHODSET(JitNoProcedureSplittingEH, W("JitNoProcedureSplittingEH")) // Disallow procedure splitting for // specified methods if they contain // exception handling CONFIG_METHODSET(JitStressOnly, W("JitStressOnly")) // Internal Jit stress mode: stress only the specified method(s) CONFIG_METHODSET(JitUnwindDump, W("JitUnwindDump")) // Dump the unwind codes for the method /// /// NGEN /// CONFIG_METHODSET(NgenDisasm, W("NgenDisasm")) // Same as JitDisasm, but for ngen CONFIG_METHODSET(NgenDump, W("NgenDump")) // Same as JitDump, but for ngen CONFIG_METHODSET(NgenEHDump, W("NgenEHDump")) // Dump the EH table for the method, as reported to the VM CONFIG_METHODSET(NgenGCDump, W("NgenGCDump")) CONFIG_METHODSET(NgenDebugDump, W("NgenDebugDump")) CONFIG_METHODSET(NgenUnwindDump, W("NgenUnwindDump")) // Dump the unwind codes for the method /// /// JIT /// CONFIG_METHODSET(JitDumpFg, W("JitDumpFg")) // Dumps Xml/Dot Flowgraph for specified method CONFIG_STRING(JitDumpFgDir, W("JitDumpFgDir")) // Directory for Xml/Dot flowgraph dump(s) CONFIG_STRING(JitDumpFgFile, W("JitDumpFgFile")) // Filename for Xml/Dot flowgraph dump(s) (default: "default") CONFIG_STRING(JitDumpFgPhase, W("JitDumpFgPhase")) // Phase-based Xml/Dot flowgraph support. Set to the short name of a // phase to see the flowgraph after that phase. Leave unset to dump // after COLD-BLK (determine first cold block) or set to * for all // phases CONFIG_STRING(JitDumpFgPrePhase, W("JitDumpFgPrePhase")) // Same as JitDumpFgPhase, but specifies to dump pre-phase, not post-phase. CONFIG_INTEGER(JitDumpFgDot, W("JitDumpFgDot"), 1) // 0 == dump XML format; non-zero == dump DOT format CONFIG_INTEGER(JitDumpFgEH, W("JitDumpFgEH"), 0) // 0 == no EH regions; non-zero == include EH regions CONFIG_INTEGER(JitDumpFgLoops, W("JitDumpFgLoops"), 0) // 0 == no loop regions; non-zero == include loop regions CONFIG_INTEGER(JitDumpFgConstrained, W("JitDumpFgConstrained"), 1) // 0 == don't constrain to mostly linear layout; // non-zero == force mostly lexical block // linear layout CONFIG_INTEGER(JitDumpFgBlockID, W("JitDumpFgBlockID"), 0) // 0 == display block with bbNum; 1 == display with both // bbNum and bbID CONFIG_INTEGER(JitDumpFgBlockFlags, W("JitDumpFgBlockFlags"), 0) // 0 == don't display block flags; 1 == display flags CONFIG_INTEGER(JitDumpFgLoopFlags, W("JitDumpFgLoopFlags"), 0) // 0 == don't display loop flags; 1 == display flags CONFIG_STRING(JitDumpPreciseDebugInfoFile, W("JitDumpPreciseDebugInfoFile")) CONFIG_INTEGER(JitDisasmWithDebugInfo, W("JitDisasmWithDebugInfo"), 0) CONFIG_STRING(JitLateDisasmTo, W("JITLateDisasmTo")) CONFIG_STRING(JitRange, W("JitRange")) CONFIG_STRING(JitStressModeNames, W("JitStressModeNames")) // Internal Jit stress mode: stress using the given set of // stress mode names, e.g. STRESS_REGS, STRESS_TAILCALL CONFIG_STRING(JitStressModeNamesNot, W("JitStressModeNamesNot")) // Internal Jit stress mode: do NOT stress using the // given set of stress mode names, e.g. STRESS_REGS, // STRESS_TAILCALL CONFIG_STRING(JitStressRange, W("JitStressRange")) // Internal Jit stress mode /// /// NGEN /// CONFIG_METHODSET(NgenDumpFg, W("NgenDumpFg")) // Ngen Xml/Dot flowgraph dump support CONFIG_STRING(NgenDumpFgDir, W("NgenDumpFgDir")) // Ngen Xml/Dot flowgraph dump support CONFIG_STRING(NgenDumpFgFile, W("NgenDumpFgFile")) // Ngen Xml/Dot flowgraph dump support /// /// JIT Hardware Intrinsics /// CONFIG_INTEGER(EnableIncompleteISAClass, W("EnableIncompleteISAClass"), 0) // Enable testing not-yet-implemented // intrinsic classes #endif // defined(DEBUG) #if FEATURE_LOOP_ALIGN CONFIG_INTEGER(JitAlignLoops, W("JitAlignLoops"), 1) // If set, align inner loops #else CONFIG_INTEGER(JitAlignLoops, W("JitAlignLoops"), 0) #endif /// /// JIT /// #ifdef FEATURE_ENABLE_NO_RANGE_CHECKS CONFIG_INTEGER(JitNoRangeChks, W("JitNoRngChks"), 0) // If 1, don't generate range checks #endif // AltJitAssertOnNYI should be 0 on targets where JIT is under development or bring up stage, so as to facilitate // fallback to main JIT on hitting a NYI. #if defined(TARGET_ARM64) || defined(TARGET_X86) CONFIG_INTEGER(AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 0) // Controls the AltJit behavior of NYI stuff #else // !defined(TARGET_ARM64) && !defined(TARGET_X86) CONFIG_INTEGER(AltJitAssertOnNYI, W("AltJitAssertOnNYI"), 1) // Controls the AltJit behavior of NYI stuff #endif // defined(TARGET_ARM64) || defined(TARGET_X86) CONFIG_INTEGER(EnableEHWriteThru, W("EnableEHWriteThru"), 1) // Enable the register allocator to support EH-write thru: // partial enregistration of vars exposed on EH boundaries CONFIG_INTEGER(EnableMultiRegLocals, W("EnableMultiRegLocals"), 1) // Enable the enregistration of locals that are // defined or used in a multireg context. // clang-format on #ifdef FEATURE_SIMD CONFIG_INTEGER(JitDisableSimdVN, W("JitDisableSimdVN"), 0) // Default 0, ValueNumbering of SIMD nodes and HW Intrinsic // nodes enabled // If 1, then disable ValueNumbering of SIMD nodes // If 2, then disable ValueNumbering of HW Intrinsic nodes // If 3, disable both SIMD and HW Intrinsic nodes #endif // FEATURE_SIMD // Default 0, enable the CSE of Constants, including nearby offsets. (only for ARM64) // If 1, disable all the CSE of Constants // If 2, enable the CSE of Constants but don't combine with nearby offsets. (only for ARM64) // If 3, enable the CSE of Constants including nearby offsets. (all platforms) // If 4, enable the CSE of Constants but don't combine with nearby offsets. (all platforms) // CONFIG_INTEGER(JitConstCSE, W("JitConstCSE"), 0) #define CONST_CSE_ENABLE_ARM64 0 #define CONST_CSE_DISABLE_ALL 1 #define CONST_CSE_ENABLE_ARM64_NO_SHARING 2 #define CONST_CSE_ENABLE_ALL 3 #define CONST_CSE_ENABLE_ALL_NO_SHARING 4 /// /// JIT /// #if !defined(DEBUG) && !defined(_DEBUG) CONFIG_INTEGER(JitEnableNoWayAssert, W("JitEnableNoWayAssert"), 0) #else // defined(DEBUG) || defined(_DEBUG) CONFIG_INTEGER(JitEnableNoWayAssert, W("JitEnableNoWayAssert"), 1) #endif // !defined(DEBUG) && !defined(_DEBUG) #if defined(TARGET_AMD64) || defined(TARGET_X86) #define JitMinOptsTrackGCrefs_Default 0 // Not tracking GC refs in MinOpts is new behavior #else #define JitMinOptsTrackGCrefs_Default 1 #endif CONFIG_INTEGER(JitMinOptsTrackGCrefs, W("JitMinOptsTrackGCrefs"), JitMinOptsTrackGCrefs_Default) // Track GC roots // The following should be wrapped inside "#if MEASURE_MEM_ALLOC / #endif", but // some files include this one without bringing in the definitions from "jit.h" // so we don't always know what the "true" value of that flag should be. For now // we take the easy way out and always include the flag, even in release builds // (normally MEASURE_MEM_ALLOC is off for release builds but if it's toggled on // for release in "jit.h" the flag would be missing for some includers). // TODO-Cleanup: need to make 'MEASURE_MEM_ALLOC' well-defined here at all times. CONFIG_INTEGER(DisplayMemStats, W("JitMemStats"), 0) // Display JIT memory usage statistics #if defined(DEBUG) CONFIG_INTEGER(JitEnregStats, W("JitEnregStats"), 0) // Display JIT enregistration statistics #endif // DEBUG CONFIG_INTEGER(JitAggressiveInlining, W("JitAggressiveInlining"), 0) // Aggressive inlining of all methods CONFIG_INTEGER(JitELTHookEnabled, W("JitELTHookEnabled"), 0) // If 1, emit Enter/Leave/TailCall callbacks CONFIG_INTEGER(JitInlineSIMDMultiplier, W("JitInlineSIMDMultiplier"), 3) // Ex lclMAX_TRACKED constant. CONFIG_INTEGER(JitMaxLocalsToTrack, W("JitMaxLocalsToTrack"), 0x400) #if defined(FEATURE_ENABLE_NO_RANGE_CHECKS) CONFIG_INTEGER(JitNoRngChks, W("JitNoRngChks"), 0) // If 1, don't generate range checks #endif // defined(FEATURE_ENABLE_NO_RANGE_CHECKS) #if defined(OPT_CONFIG) CONFIG_INTEGER(JitDoAssertionProp, W("JitDoAssertionProp"), 1) // Perform assertion propagation optimization CONFIG_INTEGER(JitDoCopyProp, W("JitDoCopyProp"), 1) // Perform copy propagation on variables that appear redundant CONFIG_INTEGER(JitDoEarlyProp, W("JitDoEarlyProp"), 1) // Perform Early Value Propagation CONFIG_INTEGER(JitDoLoopHoisting, W("JitDoLoopHoisting"), 1) // Perform loop hoisting on loop invariant values CONFIG_INTEGER(JitDoLoopInversion, W("JitDoLoopInversion"), 1) // Perform loop inversion on "for/while" loops CONFIG_INTEGER(JitDoRangeAnalysis, W("JitDoRangeAnalysis"), 1) // Perform range check analysis CONFIG_INTEGER(JitDoRedundantBranchOpts, W("JitDoRedundantBranchOpts"), 1) // Perform redundant branch optimizations CONFIG_INTEGER(JitDoSsa, W("JitDoSsa"), 1) // Perform Static Single Assignment (SSA) numbering on the variables CONFIG_INTEGER(JitDoValueNumber, W("JitDoValueNumber"), 1) // Perform value numbering on method expressions CONFIG_METHODSET(JitOptRepeat, W("JitOptRepeat")) // Runs optimizer multiple times on the method CONFIG_INTEGER(JitOptRepeatCount, W("JitOptRepeatCount"), 2) // Number of times to repeat opts when repeating #endif // defined(OPT_CONFIG) CONFIG_INTEGER(JitTelemetry, W("JitTelemetry"), 1) // If non-zero, gather JIT telemetry data // Max # of MapSelect's considered for a particular top-level invocation. CONFIG_INTEGER(JitVNMapSelBudget, W("JitVNMapSelBudget"), DEFAULT_MAP_SELECT_BUDGET) CONFIG_INTEGER(TailCallLoopOpt, W("TailCallLoopOpt"), 1) // Convert recursive tail calls to loops CONFIG_METHODSET(AltJit, W("AltJit")) // Enables AltJit and selectively limits it to the specified methods. CONFIG_METHODSET(AltJitNgen, W("AltJitNgen")) // Enables AltJit for NGEN and selectively limits it // to the specified methods. CONFIG_STRING(AltJitExcludeAssemblies, W("AltJitExcludeAssemblies")) // Do not use AltJit on this // semicolon-delimited list of assemblies. CONFIG_INTEGER(JitMeasureIR, W("JitMeasureIR"), 0) // If set, measure the IR size after some phases and report it in // the time log. CONFIG_STRING(JitFuncInfoFile, W("JitFuncInfoLogFile")) // If set, gather JIT function info and write to this file. CONFIG_STRING(JitTimeLogCsv, W("JitTimeLogCsv")) // If set, gather JIT throughput data and write to a CSV file. This // mode must be used in internal retail builds. CONFIG_STRING(TailCallOpt, W("TailCallOpt")) CONFIG_INTEGER(FastTailCalls, W("FastTailCalls"), 1) // If set, allow fast tail calls; otherwise allow only helper-based // calls // for explicit tail calls. CONFIG_INTEGER(JitMeasureNowayAssert, W("JitMeasureNowayAssert"), 0) // Set to 1 to measure noway_assert usage. Only // valid if MEASURE_NOWAY is defined. CONFIG_STRING(JitMeasureNowayAssertFile, W("JitMeasureNowayAssertFile")) // Set to file to write noway_assert usage to a file (if not // set: stdout). Only valid if MEASURE_NOWAY is defined. #if defined(DEBUG) CONFIG_INTEGER(EnableExtraSuperPmiQueries, W("EnableExtraSuperPmiQueries"), 0) // Make extra queries to somewhat // future-proof SuperPmi method contexts. #endif // DEBUG #if defined(DEBUG) || defined(INLINE_DATA) CONFIG_INTEGER(JitInlineDumpData, W("JitInlineDumpData"), 0) CONFIG_INTEGER(JitInlineDumpXml, W("JitInlineDumpXml"), 0) // 1 = full xml (+ failures in DEBUG) // 2 = only methods with inlines (+ failures in DEBUG) // 3 = only methods with inlines, no failures CONFIG_STRING(JitInlineDumpXmlFile, W("JitInlineDumpXmlFile")) CONFIG_INTEGER(JitInlinePolicyDumpXml, W("JitInlinePolicyDumpXml"), 0) CONFIG_INTEGER(JitInlineLimit, W("JitInlineLimit"), -1) CONFIG_INTEGER(JitInlinePolicyDiscretionary, W("JitInlinePolicyDiscretionary"), 0) CONFIG_INTEGER(JitInlinePolicyFull, W("JitInlinePolicyFull"), 0) CONFIG_INTEGER(JitInlinePolicySize, W("JitInlinePolicySize"), 0) CONFIG_INTEGER(JitInlinePolicyRandom, W("JitInlinePolicyRandom"), 0) // nonzero enables; value is the external random // seed CONFIG_INTEGER(JitInlinePolicyReplay, W("JitInlinePolicyReplay"), 0) CONFIG_STRING(JitNoInlineRange, W("JitNoInlineRange")) CONFIG_STRING(JitInlineReplayFile, W("JitInlineReplayFile")) #endif // defined(DEBUG) || defined(INLINE_DATA) // Extended version of DefaultPolicy that includes a more precise IL scan, // relies on PGO if it exists and generally is more aggressive. CONFIG_INTEGER(JitExtDefaultPolicy, W("JitExtDefaultPolicy"), 1) CONFIG_INTEGER(JitExtDefaultPolicyMaxIL, W("JitExtDefaultPolicyMaxIL"), 0x80) CONFIG_INTEGER(JitExtDefaultPolicyMaxILProf, W("JitExtDefaultPolicyMaxILProf"), 0x400) CONFIG_INTEGER(JitExtDefaultPolicyMaxBB, W("JitExtDefaultPolicyMaxBB"), 7) // Inliner uses the following formula for PGO-driven decisions: // // BM = BM * ((1.0 - ProfTrust) + ProfWeight * ProfScale) // // Where BM is a benefit multiplier composed from various observations (e.g. "const arg makes a branch foldable"). // If a profile data can be trusted for 100% we can safely just give up on inlining anything inside cold blocks // (except the cases where inlining in cold blocks improves type info/escape analysis for the whole caller). // For now, it's only applied for dynamic PGO. CONFIG_INTEGER(JitExtDefaultPolicyProfTrust, W("JitExtDefaultPolicyProfTrust"), 0x7) CONFIG_INTEGER(JitExtDefaultPolicyProfScale, W("JitExtDefaultPolicyProfScale"), 0x2A) CONFIG_INTEGER(JitInlinePolicyModel, W("JitInlinePolicyModel"), 0) CONFIG_INTEGER(JitInlinePolicyProfile, W("JitInlinePolicyProfile"), 0) CONFIG_INTEGER(JitInlinePolicyProfileThreshold, W("JitInlinePolicyProfileThreshold"), 40) CONFIG_INTEGER(JitObjectStackAllocation, W("JitObjectStackAllocation"), 0) CONFIG_INTEGER(JitEECallTimingInfo, W("JitEECallTimingInfo"), 0) #if defined(DEBUG) CONFIG_INTEGER(JitEnableFinallyCloning, W("JitEnableFinallyCloning"), 1) CONFIG_INTEGER(JitEnableRemoveEmptyTry, W("JitEnableRemoveEmptyTry"), 1) #endif // DEBUG // Overall master enable for Guarded Devirtualization. CONFIG_INTEGER(JitEnableGuardedDevirtualization, W("JitEnableGuardedDevirtualization"), 1) // Various policies for GuardedDevirtualization CONFIG_INTEGER(JitGuardedDevirtualizationChainLikelihood, W("JitGuardedDevirtualizationChainLikelihood"), 0x4B) // 75 CONFIG_INTEGER(JitGuardedDevirtualizationChainStatements, W("JitGuardedDevirtualizationChainStatements"), 4) #if defined(DEBUG) CONFIG_STRING(JitGuardedDevirtualizationRange, W("JitGuardedDevirtualizationRange")) CONFIG_INTEGER(JitRandomGuardedDevirtualization, W("JitRandomGuardedDevirtualization"), 0) #endif // DEBUG // Enable insertion of patchpoints into Tier0 methods with loops. CONFIG_INTEGER(TC_OnStackReplacement, W("TC_OnStackReplacement"), 0) // Initial patchpoint counter value used by jitted code CONFIG_INTEGER(TC_OnStackReplacement_InitialCounter, W("TC_OnStackReplacement_InitialCounter"), 1000) // Enable partial compilation for Tier0 methods CONFIG_INTEGER(TC_PartialCompilation, W("TC_PartialCompilation"), 0) // Patchpoint strategy: // 0 - backedge sources // 1 - backedge targets // 2 - adaptive (default) CONFIG_INTEGER(TC_PatchpointStrategy, W("TC_PatchpointStrategy"), 2) #if defined(DEBUG) // Randomly sprinkle patchpoints. Value is the likelyhood any given stack-empty point becomes a patchpoint. CONFIG_INTEGER(JitRandomOnStackReplacement, W("JitRandomOnStackReplacement"), 0) // Place patchpoint at the specified IL offset, if possible. Overrides random placement. CONFIG_INTEGER(JitOffsetOnStackReplacement, W("JitOffsetOnStackReplacement"), -1) #endif // debug #if defined(DEBUG) // EnableOsrRange allows you to limit the set of methods that will rely on OSR to escape // from Tier0 code. Methods outside the range that would normally be jitted at Tier0 // and have patchpoints will instead be switched to optimized. CONFIG_STRING(JitEnableOsrRange, W("JitEnableOsrRange")) // EnablePatchpointRange allows you to limit the set of Tier0 methods that // will have patchpoints, and hence control which methods will create OSR methods. // Unlike EnableOsrRange, it will not alter the optimization setting for methods // outside the enabled range. CONFIG_STRING(JitEnablePatchpointRange, W("JitEnablePatchpointRange")) #endif // Profile instrumentation options CONFIG_INTEGER(JitMinimalJitProfiling, W("JitMinimalJitProfiling"), 1) CONFIG_INTEGER(JitMinimalPrejitProfiling, W("JitMinimalPrejitProfiling"), 0) CONFIG_INTEGER(JitCastProfiling, W("JitCastProfiling"), 0) // Profile castclass and isinst CONFIG_INTEGER(JitClassProfiling, W("JitClassProfiling"), 1) // Profile virtual and interface calls CONFIG_INTEGER(JitEdgeProfiling, W("JitEdgeProfiling"), 1) // Profile edges instead of blocks CONFIG_INTEGER(JitCollect64BitCounts, W("JitCollect64BitCounts"), 0) // Collect counts as 64-bit values. // Profile consumption options CONFIG_INTEGER(JitDisablePgo, W("JitDisablePgo"), 0) // Ignore pgo data for all methods #if defined(DEBUG) CONFIG_STRING(JitEnablePgoRange, W("JitEnablePgoRange")) // Enable pgo data for only some methods CONFIG_INTEGER(JitRandomEdgeCounts, W("JitRandomEdgeCounts"), 0) // Substitute random values for edge counts CONFIG_INTEGER(JitCrossCheckDevirtualizationAndPGO, W("JitCrossCheckDevirtualizationAndPGO"), 0) CONFIG_INTEGER(JitNoteFailedExactDevirtualization, W("JitNoteFailedExactDevirtualization"), 0) #endif // debug // Control when Virtual Calls are expanded CONFIG_INTEGER(JitExpandCallsEarly, W("JitExpandCallsEarly"), 1) // Expand Call targets early (in the global morph // phase) // Force the generation of CFG checks CONFIG_INTEGER(JitForceControlFlowGuard, W("JitForceControlFlowGuard"), 0); // JitCFGUseDispatcher values: // 0: Never use dispatcher // 1: Use dispatcher on all platforms that support it // 2: Default behavior, depends on platform (yes on x64, no on arm64) CONFIG_INTEGER(JitCFGUseDispatcher, W("JitCFGUseDispatcher"), 2) #if defined(DEBUG) // JitFunctionFile: Name of a file that contains a list of functions. If the currently compiled function is in the // file, certain other JIT config variables will be active. If the currently compiled function is not in the file, // the specific JIT config variables will not be active. // // Functions are approximately in the format output by JitFunctionTrace, e.g.: // // System.CLRConfig:GetBoolValue(ref,byref):bool (MethodHash=3c54d35e) // -- use the MethodHash, not the function name // // System.CLRConfig:GetBoolValue(ref,byref):bool // -- use just the name // // Lines with leading ";" "#" or "//" are ignored. // // If this is unset, then the JIT config values have their normal behavior. // CONFIG_STRING(JitFunctionFile, W("JitFunctionFile")) #endif // DEBUG #if defined(DEBUG) #if defined(TARGET_ARM64) // JitSaveFpLrWithCalleeSavedRegisters: // 0: use default frame type decision // 1: disable frames that save FP/LR registers with the callee-saved registers (at the top of the frame) // 2: force all frames to use the frame types that save FP/LR registers with the callee-saved registers (at the top // of the frame) CONFIG_INTEGER(JitSaveFpLrWithCalleeSavedRegisters, W("JitSaveFpLrWithCalleeSavedRegisters"), 0) #endif // defined(TARGET_ARM64) #endif // DEBUG CONFIG_INTEGER(JitEnregStructLocals, W("JitEnregStructLocals"), 1) // Allow to enregister locals with struct type. #undef CONFIG_INTEGER #undef CONFIG_STRING #undef CONFIG_METHODSET
1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/jit/jiteh.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Exception Handling XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX "EHblkDsc" functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ BasicBlock* EHblkDsc::BBFilterLast() { noway_assert(HasFilter()); noway_assert(ebdFilter != nullptr); noway_assert(ebdHndBeg != nullptr); // The last block of the filter is the block immediately preceding the first block of the handler. return ebdHndBeg->bbPrev; } BasicBlock* EHblkDsc::ExFlowBlock() { if (HasFilter()) { return ebdFilter; } else { return ebdHndBeg; } } bool EHblkDsc::InTryRegionILRange(BasicBlock* pBlk) { // BBF_INTERNAL blocks may not have a valid bbCodeOffs. This function // should only be used before any BBF_INTERNAL blocks have been added. assert(!(pBlk->bbFlags & BBF_INTERNAL)); return Compiler::jitIsBetween(pBlk->bbCodeOffs, ebdTryBegOffs(), ebdTryEndOffs()); } bool EHblkDsc::InFilterRegionILRange(BasicBlock* pBlk) { // BBF_INTERNAL blocks may not have a valid bbCodeOffs. This function // should only be used before any BBF_INTERNAL blocks have been added. assert(!(pBlk->bbFlags & BBF_INTERNAL)); return HasFilter() && Compiler::jitIsBetween(pBlk->bbCodeOffs, ebdFilterBegOffs(), ebdFilterEndOffs()); } bool EHblkDsc::InHndRegionILRange(BasicBlock* pBlk) { // BBF_INTERNAL blocks may not have a valid bbCodeOffs. This function // should only be used before any BBF_INTERNAL blocks have been added. assert(!(pBlk->bbFlags & BBF_INTERNAL)); return Compiler::jitIsBetween(pBlk->bbCodeOffs, ebdHndBegOffs(), ebdHndEndOffs()); } // HasCatchHandler: returns 'true' for either try/catch, or try/filter/filter-handler. bool EHblkDsc::HasCatchHandler() { return (ebdHandlerType == EH_HANDLER_CATCH) || (ebdHandlerType == EH_HANDLER_FILTER); } bool EHblkDsc::HasFilter() { return ebdHandlerType == EH_HANDLER_FILTER; } bool EHblkDsc::HasFinallyHandler() { return ebdHandlerType == EH_HANDLER_FINALLY; } bool EHblkDsc::HasFaultHandler() { return (ebdHandlerType == EH_HANDLER_FAULT) || (ebdHandlerType == EH_HANDLER_FAULT_WAS_FINALLY); } bool EHblkDsc::HasFinallyOrFaultHandler() { return HasFinallyHandler() || HasFaultHandler(); } /***************************************************************************** * Returns true if pBlk is a block in the range [pStart..pEnd). * The check is inclusive of pStart, exclusive of pEnd. */ bool EHblkDsc::InBBRange(BasicBlock* pBlk, BasicBlock* pStart, BasicBlock* pEnd) { for (BasicBlock* pWalk = pStart; pWalk != pEnd; pWalk = pWalk->bbNext) { if (pWalk == pBlk) { return true; } } return false; } bool EHblkDsc::InTryRegionBBRange(BasicBlock* pBlk) { return InBBRange(pBlk, ebdTryBeg, ebdTryLast->bbNext); } bool EHblkDsc::InFilterRegionBBRange(BasicBlock* pBlk) { return HasFilter() && InBBRange(pBlk, ebdFilter, ebdHndBeg); } bool EHblkDsc::InHndRegionBBRange(BasicBlock* pBlk) { return InBBRange(pBlk, ebdHndBeg, ebdHndLast->bbNext); } unsigned EHblkDsc::ebdGetEnclosingRegionIndex(bool* inTryRegion) { if ((ebdEnclosingTryIndex == NO_ENCLOSING_INDEX) && (ebdEnclosingHndIndex == NO_ENCLOSING_INDEX)) { return NO_ENCLOSING_INDEX; } else if (ebdEnclosingTryIndex == NO_ENCLOSING_INDEX) { assert(ebdEnclosingHndIndex != NO_ENCLOSING_INDEX); *inTryRegion = false; return ebdEnclosingHndIndex; } else if (ebdEnclosingHndIndex == NO_ENCLOSING_INDEX) { assert(ebdEnclosingTryIndex != NO_ENCLOSING_INDEX); *inTryRegion = true; return ebdEnclosingTryIndex; } else { assert(ebdEnclosingTryIndex != NO_ENCLOSING_INDEX); assert(ebdEnclosingHndIndex != NO_ENCLOSING_INDEX); assert(ebdEnclosingTryIndex != ebdEnclosingHndIndex); if (ebdEnclosingTryIndex < ebdEnclosingHndIndex) { *inTryRegion = true; return ebdEnclosingTryIndex; } else { *inTryRegion = false; return ebdEnclosingHndIndex; } } } /*****************************************************************************/ // We used to assert that the IL offsets in the EH table matched the IL offset stored // on the blocks pointed to by the try/filter/handler block pointers. This is true at // import time, but can fail to be true later in compilation when we start doing // flow optimizations. // // That being said, the IL offsets in the EH table should only be examined early, // during importing. After importing, use block info instead. IL_OFFSET EHblkDsc::ebdTryBegOffs() { return ebdTryBegOffset; } IL_OFFSET EHblkDsc::ebdTryEndOffs() { return ebdTryEndOffset; } IL_OFFSET EHblkDsc::ebdHndBegOffs() { return ebdHndBegOffset; } IL_OFFSET EHblkDsc::ebdHndEndOffs() { return ebdHndEndOffset; } IL_OFFSET EHblkDsc::ebdFilterBegOffs() { assert(HasFilter()); return ebdFilterBegOffset; } IL_OFFSET EHblkDsc::ebdFilterEndOffs() { assert(HasFilter()); return ebdHndBegOffs(); // end of filter is beginning of handler } /* static */ bool EHblkDsc::ebdIsSameILTry(EHblkDsc* h1, EHblkDsc* h2) { return ((h1->ebdTryBegOffset == h2->ebdTryBegOffset) && (h1->ebdTryEndOffset == h2->ebdTryEndOffset)); } /*****************************************************************************/ /* static */ bool EHblkDsc::ebdIsSameTry(EHblkDsc* h1, EHblkDsc* h2) { return ((h1->ebdTryBeg == h2->ebdTryBeg) && (h1->ebdTryLast == h2->ebdTryLast)); } bool EHblkDsc::ebdIsSameTry(Compiler* comp, unsigned t2) { EHblkDsc* h2 = comp->ehGetDsc(t2); return ebdIsSameTry(this, h2); } bool EHblkDsc::ebdIsSameTry(BasicBlock* ebdTryBeg, BasicBlock* ebdTryLast) { return ((this->ebdTryBeg == ebdTryBeg) && (this->ebdTryLast == ebdTryLast)); } /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ void EHblkDsc::DispEntry(unsigned XTnum) { printf(" %2u ::", XTnum); #if !defined(FEATURE_EH_FUNCLETS) printf(" %2u ", XTnum, ebdHandlerNestingLevel); #endif // !FEATURE_EH_FUNCLETS if (ebdEnclosingTryIndex == NO_ENCLOSING_INDEX) { printf(" "); } else { printf(" %2u ", ebdEnclosingTryIndex); } if (ebdEnclosingHndIndex == NO_ENCLOSING_INDEX) { printf(" "); } else { printf(" %2u ", ebdEnclosingHndIndex); } ////////////// ////////////// Protected (try) region ////////////// printf("- Try at " FMT_BB ".." FMT_BB, ebdTryBeg->bbNum, ebdTryLast->bbNum); /* ( brace matching editor workaround to compensate for the following line */ printf(" [%03X..%03X), ", ebdTryBegOffset, ebdTryEndOffset); ////////////// ////////////// Filter region ////////////// if (HasFilter()) { /* ( brace matching editor workaround to compensate for the following line */ printf("Filter at " FMT_BB ".." FMT_BB " [%03X..%03X), ", ebdFilter->bbNum, BBFilterLast()->bbNum, ebdFilterBegOffset, ebdHndBegOffset); } ////////////// ////////////// Handler region ////////////// if (ebdHndBeg->bbCatchTyp == BBCT_FINALLY) { printf("Finally"); } else if (ebdHndBeg->bbCatchTyp == BBCT_FAULT) { printf("Fault "); } else { printf("Handler"); } printf(" at " FMT_BB ".." FMT_BB, ebdHndBeg->bbNum, ebdHndLast->bbNum); /* ( brace matching editor workaround to compensate for the following line */ printf(" [%03X..%03X)", ebdHndBegOffset, ebdHndEndOffset); printf("\n"); } /*****************************************************************************/ #endif // DEBUG /*****************************************************************************/ /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX "Compiler" functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ bool Compiler::bbInCatchHandlerILRange(BasicBlock* blk) { EHblkDsc* HBtab = ehGetBlockHndDsc(blk); if (HBtab == nullptr) { return false; } return HBtab->HasCatchHandler() && HBtab->InHndRegionILRange(blk); } bool Compiler::bbInFilterILRange(BasicBlock* blk) { EHblkDsc* HBtab = ehGetBlockHndDsc(blk); if (HBtab == nullptr) { return false; } return HBtab->InFilterRegionILRange(blk); } // Given a handler region, find the innermost try region that contains it. // NOTE: handlerIndex is 1-based (0 means no handler). unsigned short Compiler::bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex) { if (handlerIndex > 0) { unsigned XTnum; EHblkDsc* ehDsc; BasicBlock* blk = ehGetDsc(handlerIndex - 1)->ebdHndBeg; // handlerIndex is 1 based, therefore our interesting clauses start from clause compHndBBtab[handlerIndex] EHblkDsc* ehDscEnd = compHndBBtab + compHndBBtabCount; for (ehDsc = compHndBBtab + handlerIndex, XTnum = handlerIndex; ehDsc < ehDscEnd; ehDsc++, XTnum++) { if (bbInTryRegions(XTnum, blk)) { noway_assert(XTnum < MAX_XCPTN_INDEX); return (unsigned short)(XTnum + 1); // Return the tryIndex } } } return 0; } // Given a try region, find the innermost handler region that contains it. // NOTE: tryIndex is 1-based (0 means no handler). unsigned short Compiler::bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex) { if (tryIndex > 0) { unsigned XTnum; EHblkDsc* ehDsc; BasicBlock* blk = ehGetDsc(tryIndex - 1)->ebdTryBeg; // tryIndex is 1 based, our interesting clauses start from clause compHndBBtab[tryIndex] EHblkDsc* ehDscEnd = compHndBBtab + compHndBBtabCount; for (ehDsc = compHndBBtab + tryIndex, XTnum = tryIndex; ehDsc < ehDscEnd; ehDsc++, XTnum++) { if (bbInHandlerRegions(XTnum, blk)) { noway_assert(XTnum < MAX_XCPTN_INDEX); return (unsigned short)(XTnum + 1); // Return the handlerIndex } } } return 0; } /* Given a block and a try region index, check to see if the block is within the try body. For this check, a funclet is considered to be in the region it was extracted from. */ bool Compiler::bbInTryRegions(unsigned regionIndex, BasicBlock* blk) { assert(regionIndex < EHblkDsc::NO_ENCLOSING_INDEX); unsigned tryIndex = blk->hasTryIndex() ? blk->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX; // Loop outward until we find an enclosing try that is the same as the one // we are looking for or an outer/later one while (tryIndex < regionIndex) { tryIndex = ehGetEnclosingTryIndex(tryIndex); } // Now we have the index of 2 try bodies, either they match or not! return (tryIndex == regionIndex); } //------------------------------------------------------------------------ // bbInExnFlowRegions: // Check to see if an exception raised in the given block could be // handled by the given region (possibly after inner regions). // // Arguments: // regionIndex - Check if this region can handle exceptions from 'blk' // blk - Consider exceptions raised from this block // // Return Value: // true - The region with index 'regionIndex' can handle exceptions from 'blk' // false - The region with index 'regionIndex' can't handle exceptions from 'blk' // // Notes: // For this check, a funclet is considered to be in the region it was // extracted from. bool Compiler::bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk) { assert(regionIndex < EHblkDsc::NO_ENCLOSING_INDEX); EHblkDsc* ExnFlowRegion = ehGetBlockExnFlowDsc(blk); unsigned tryIndex = (ExnFlowRegion == nullptr ? EHblkDsc::NO_ENCLOSING_INDEX : ehGetIndex(ExnFlowRegion)); // Loop outward until we find an enclosing try that is the same as the one // we are looking for or an outer/later one while (tryIndex < regionIndex) { tryIndex = ehGetEnclosingTryIndex(tryIndex); } // Now we have the index of 2 try bodies, either they match or not! return (tryIndex == regionIndex); } /* Given a block, check to see if it is in the handler block of the EH descriptor. For this check, a funclet is considered to be in the region it was extracted from. */ bool Compiler::bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk) { assert(regionIndex < EHblkDsc::NO_ENCLOSING_INDEX); unsigned hndIndex = blk->hasHndIndex() ? blk->getHndIndex() : EHblkDsc::NO_ENCLOSING_INDEX; // We can't use the same simple trick here because there is no required ordering // of handlers (which also have no required ordering with respect to their try // bodies). while (hndIndex < EHblkDsc::NO_ENCLOSING_INDEX && hndIndex != regionIndex) { hndIndex = ehGetEnclosingHndIndex(hndIndex); } // Now we have the index of 2 try bodies, either they match or not! return (hndIndex == regionIndex); } /* Given a hndBlk, see if it is in one of tryBlk's catch handler regions. Since we create one EHblkDsc for each "catch" of a "try", we might end up with multiple EHblkDsc's that have the same ebdTryBeg and ebdTryLast, but different ebdHndBeg and ebdHndLast. Unfortunately getTryIndex() only returns the index of the first EHblkDsc. E.g. The following example shows that BB02 has a catch in BB03 and another catch in BB04. index nest, enclosing 0 :: 0, 1 - Try at BB01..BB02 [000..008], Handler at BB03 [009..016] 1 :: 0, - Try at BB01..BB02 [000..008], Handler at BB04 [017..022] This function will return true for bbInCatchHandlerRegions(BB02, BB03) and bbInCatchHandlerRegions(BB02, BB04) */ bool Compiler::bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk) { assert(tryBlk->hasTryIndex()); if (!hndBlk->hasHndIndex()) { return false; } unsigned XTnum = tryBlk->getTryIndex(); EHblkDsc* firstEHblkDsc = ehGetDsc(XTnum); EHblkDsc* ehDsc = firstEHblkDsc; // Rather than searching the whole list, take advantage of our sorting. // We will only match against blocks with the same try body (mutually // protect regions). Because of our sort ordering, such regions will // always be immediately adjacent, any nested regions will be before the // first of the set, and any outer regions will be after the last. // Also siblings will be before or after according to their location, // but never in between; while (XTnum > 0) { assert(EHblkDsc::ebdIsSameTry(firstEHblkDsc, ehDsc)); // Stop when the previous region is not mutually protect if (!EHblkDsc::ebdIsSameTry(firstEHblkDsc, ehDsc - 1)) { break; } ehDsc--; XTnum--; } // XTnum and ehDsc are now referring to the first region in the set of // mutually protect regions. assert(EHblkDsc::ebdIsSameTry(firstEHblkDsc, ehDsc)); assert((ehDsc == compHndBBtab) || !EHblkDsc::ebdIsSameTry(firstEHblkDsc, ehDsc - 1)); do { if (ehDsc->HasCatchHandler() && bbInHandlerRegions(XTnum, hndBlk)) { return true; } XTnum++; ehDsc++; } while (XTnum < compHndBBtabCount && EHblkDsc::ebdIsSameTry(firstEHblkDsc, ehDsc)); return false; } /****************************************************************************************** * Give two blocks, return the inner-most enclosing try region that contains both of them. * Return 0 if it does not find any try region (which means the inner-most region * is the method itself). */ unsigned short Compiler::bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo) { unsigned XTnum; for (XTnum = 0; XTnum < compHndBBtabCount; XTnum++) { if (bbInTryRegions(XTnum, bbOne) && bbInTryRegions(XTnum, bbTwo)) { noway_assert(XTnum < MAX_XCPTN_INDEX); return (unsigned short)(XTnum + 1); // Return the tryIndex } } return 0; } // bbIsTryBeg() returns true if this block is the start of any try region. // This is computed by examining the current values in the // EH table rather than just looking at the block->bbFlags. // // Note that a block is the beginning of any try region if it is the beginning of the // most nested try region it is a member of. Thus, we only need to check the EH // table entry related to the try index stored on the block. // bool Compiler::bbIsTryBeg(BasicBlock* block) { EHblkDsc* ehDsc = ehGetBlockTryDsc(block); return (ehDsc != nullptr) && (block == ehDsc->ebdTryBeg); } // bbIsHanderBeg() returns true if "block" is the start of any handler or filter. // Note that if a block is the beginning of a handler or filter, it must be the beginning // of the most nested handler or filter region it is in. Thus, we only need to look at the EH // descriptor corresponding to the handler index on the block. // bool Compiler::bbIsHandlerBeg(BasicBlock* block) { EHblkDsc* ehDsc = ehGetBlockHndDsc(block); return (ehDsc != nullptr) && ((block == ehDsc->ebdHndBeg) || (ehDsc->HasFilter() && (block == ehDsc->ebdFilter))); } bool Compiler::bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex) { if (block->hasHndIndex()) { *regionIndex = block->getHndIndex(); return block == ehGetDsc(*regionIndex)->ExFlowBlock(); } else { return false; } } bool Compiler::ehHasCallableHandlers() { #if defined(FEATURE_EH_FUNCLETS) // Any EH in the function? return compHndBBtabCount > 0; #else // !FEATURE_EH_FUNCLETS return ehNeedsShadowSPslots(); #endif // !FEATURE_EH_FUNCLETS } /****************************************************************************************** * Determine if 'block' is the last block of an EH 'try' or handler (ignoring filters). If so, * return the EH descriptor pointer for that EH region. Otherwise, return nullptr. */ EHblkDsc* Compiler::ehIsBlockTryLast(BasicBlock* block) { EHblkDsc* HBtab = ehGetBlockTryDsc(block); if ((HBtab != nullptr) && (HBtab->ebdTryLast == block)) { return HBtab; } return nullptr; } EHblkDsc* Compiler::ehIsBlockHndLast(BasicBlock* block) { EHblkDsc* HBtab = ehGetBlockHndDsc(block); if ((HBtab != nullptr) && (HBtab->ebdHndLast == block)) { return HBtab; } return nullptr; } bool Compiler::ehIsBlockEHLast(BasicBlock* block) { return (ehIsBlockTryLast(block) != nullptr) || (ehIsBlockHndLast(block) != nullptr); } //------------------------------------------------------------------------ // ehGetBlockExnFlowDsc: // Get the EH descriptor for the most nested region (if any) that may // handle exceptions raised in the given block // // Arguments: // block - Consider exceptions raised from this block // // Return Value: // nullptr - The given block's exceptions propagate to caller // non-null - This region is the innermost handler for exceptions raised in // the given block EHblkDsc* Compiler::ehGetBlockExnFlowDsc(BasicBlock* block) { EHblkDsc* hndDesc = ehGetBlockHndDsc(block); if ((hndDesc != nullptr) && hndDesc->InFilterRegionBBRange(block)) { // If an exception is thrown in a filter (or escapes a callee in a filter), // or if exception_continue_search (0/false) is returned at // the end of a filter, the (original) exception is propagated to // the next outer handler. The "next outer handler" is the handler // of the try region enclosing the try that the filter protects. // This may not be the same as the try region enclosing the filter, // e.g. in cases like this: // try { // ... // } filter (filter-part) { // handler-part // } catch { (or finally/fault/filter) // which is represented as two EHblkDscs with the same try range, // the inner protected by a filter and the outer protected by the // other handler; exceptions in the filter-part propagate to the // other handler, even though the other handler's try region does not // enclose the filter. unsigned outerIndex = hndDesc->ebdEnclosingTryIndex; if (outerIndex == EHblkDsc::NO_ENCLOSING_INDEX) { assert(!block->hasTryIndex()); return nullptr; } return ehGetDsc(outerIndex); } return ehGetBlockTryDsc(block); } bool Compiler::ehBlockHasExnFlowDsc(BasicBlock* block) { if (block->hasTryIndex()) { return true; } EHblkDsc* hndDesc = ehGetBlockHndDsc(block); return ((hndDesc != nullptr) && hndDesc->InFilterRegionBBRange(block) && (hndDesc->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX)); } //------------------------------------------------------------------------ // ehGetMostNestedRegionIndex: Return the region index of the most nested EH region this block is in. // The return value is in the range [0..compHndBBtabCount]. It is same scale as bbTryIndex/bbHndIndex: // 0 means main method, N is used as an index to compHndBBtab[N - 1]. If we don't return 0, then // *inTryRegion indicates whether the most nested region for the block is a 'try' clause or // filter/handler clause. For 0 return, *inTryRegion is set to true. // // Arguments: // block - the BasicBlock we want the region index for. // inTryRegion - an out parameter. As described above. // // Return Value: // As described above. // unsigned Compiler::ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion) { assert(block != nullptr); assert(inTryRegion != nullptr); unsigned mostNestedRegion; if (block->bbHndIndex == 0) { mostNestedRegion = block->bbTryIndex; *inTryRegion = true; } else if (block->bbTryIndex == 0) { mostNestedRegion = block->bbHndIndex; *inTryRegion = false; } else { if (block->bbTryIndex < block->bbHndIndex) { mostNestedRegion = block->bbTryIndex; *inTryRegion = true; } else { assert(block->bbTryIndex != block->bbHndIndex); // A block can't be both in the 'try' and 'handler' region // of the same EH region mostNestedRegion = block->bbHndIndex; *inTryRegion = false; } } assert(mostNestedRegion <= compHndBBtabCount); return mostNestedRegion; } /***************************************************************************** * Returns the try index of the enclosing try, skipping all EH regions with the * same try region (that is, all 'mutual protect' regions). If there is no such * enclosing try, returns EHblkDsc::NO_ENCLOSING_INDEX. */ unsigned Compiler::ehTrueEnclosingTryIndexIL(unsigned regionIndex) { assert(regionIndex != EHblkDsc::NO_ENCLOSING_INDEX); EHblkDsc* ehDscRoot = ehGetDsc(regionIndex); EHblkDsc* HBtab = ehDscRoot; for (;;) { regionIndex = HBtab->ebdEnclosingTryIndex; if (regionIndex == EHblkDsc::NO_ENCLOSING_INDEX) { // No enclosing 'try'; we're done break; } HBtab = ehGetDsc(regionIndex); if (!EHblkDsc::ebdIsSameILTry(ehDscRoot, HBtab)) { // Found an enclosing 'try' that has a different 'try' region (is not mutually-protect with the // original region). Return it. break; } } return regionIndex; } unsigned Compiler::ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion) { assert(regionIndex != EHblkDsc::NO_ENCLOSING_INDEX); EHblkDsc* ehDsc = ehGetDsc(regionIndex); return ehDsc->ebdGetEnclosingRegionIndex(inTryRegion); } /***************************************************************************** * The argument 'block' has been deleted. Update the EH table so 'block' is no longer listed * as a 'last' block. You can't delete a 'begin' block this way. */ void Compiler::ehUpdateForDeletedBlock(BasicBlock* block) { assert(block->bbFlags & BBF_REMOVED); if (!block->hasTryIndex() && !block->hasHndIndex()) { // The block is not part of any EH region, so there is nothing to do. return; } BasicBlock* bPrev = block->bbPrev; assert(bPrev != nullptr); ehUpdateLastBlocks(block, bPrev); } /***************************************************************************** * Determine if an empty block can be deleted, and still preserve the EH normalization * rules on blocks. * * We only consider the case where the block to be deleted is the last block of a region, * and the region is being contracted such that the previous block will become the new * 'last' block. If this previous block is already a 'last' block, then we can't do the * delete, as that would cause a single block to be the 'last' block of multiple regions. */ bool Compiler::ehCanDeleteEmptyBlock(BasicBlock* block) { assert(block->isEmpty()); return true; #if 0 // This is disabled while the "multiple last block" normalization is disabled if (!fgNormalizeEHDone) { return true; } if (ehIsBlockEHLast(block)) { BasicBlock* bPrev = block->bbPrev; if ((bPrev != nullptr) && ehIsBlockEHLast(bPrev)) { return false; } } return true; #endif // 0 } /***************************************************************************** * The 'last' block of one or more EH regions might have changed. Update the EH table. * This can happen if the EH region shrinks, where one or more blocks have been removed * from the region. It can happen if the EH region grows, where one or more blocks * have been added at the end of the region. * * We might like to verify the handler table integrity after doing this update, but we * can't because this might just be one step by the caller in a transformation back to * a legal state. * * Arguments: * oldLast -- Search for this block as the 'last' block of one or more EH regions. * newLast -- If 'oldLast' is found to be the 'last' block of an EH region, replace it by 'newLast'. */ void Compiler::ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast) { for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->ebdTryLast == oldLast) { fgSetTryEnd(HBtab, newLast); } if (HBtab->ebdHndLast == oldLast) { fgSetHndEnd(HBtab, newLast); } } } unsigned Compiler::ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion) { assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) return ehGetDsc(finallyIndex)->ebdGetEnclosingRegionIndex(inTryRegion); #else *inTryRegion = true; return finallyIndex; #endif } void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk) { assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); assert(begBlk != nullptr); assert(endBlk != nullptr); #if FEATURE_EH_CALLFINALLY_THUNKS bool inTryRegion; unsigned callFinallyRegionIndex = ehGetCallFinallyRegionIndex(finallyIndex, &inTryRegion); if (callFinallyRegionIndex == EHblkDsc::NO_ENCLOSING_INDEX) { *begBlk = fgFirstBB; *endBlk = fgEndBBAfterMainFunction(); } else { EHblkDsc* ehDsc = ehGetDsc(callFinallyRegionIndex); if (inTryRegion) { *begBlk = ehDsc->ebdTryBeg; *endBlk = ehDsc->ebdTryLast->bbNext; } else { *begBlk = ehDsc->ebdHndBeg; *endBlk = ehDsc->ebdHndLast->bbNext; } } #else // !FEATURE_EH_CALLFINALLY_THUNKS EHblkDsc* ehDsc = ehGetDsc(finallyIndex); *begBlk = ehDsc->ebdTryBeg; *endBlk = ehDsc->ebdTryLast->bbNext; #endif // !FEATURE_EH_CALLFINALLY_THUNKS } #ifdef DEBUG bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex) { assert(blockCallFinally->bbJumpKind == BBJ_CALLFINALLY); assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(finallyIndex < compHndBBtabCount); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); bool inTryRegion; unsigned callFinallyIndex = ehGetCallFinallyRegionIndex(finallyIndex, &inTryRegion); if (callFinallyIndex == EHblkDsc::NO_ENCLOSING_INDEX) { if (blockCallFinally->hasTryIndex() || blockCallFinally->hasHndIndex()) { // The BBJ_CALLFINALLY is supposed to be in the main function body, not in any EH region. return false; } else { return true; } } else { if (inTryRegion) { if (bbInTryRegions(callFinallyIndex, blockCallFinally)) { return true; } } else { if (bbInHandlerRegions(callFinallyIndex, blockCallFinally)) { return true; } } } return false; } #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) /***************************************************************************** * * Are there (or will there be) any funclets in the function? */ bool Compiler::ehAnyFunclets() { return compHndBBtabCount > 0; // if there is any EH, there will be funclets } /***************************************************************************** * * Count the number of EH funclets in the function. This will return the number * there will be after funclets have been created, but because it runs over the * EH table, it is accurate at any time. */ unsigned Compiler::ehFuncletCount() { unsigned funcletCnt = 0; for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->HasFilter()) { ++funcletCnt; } ++funcletCnt; } return funcletCnt; } /***************************************************************************** * * Get the index to use as the cache key for sharing throw blocks. * For non-funclet platforms, this is just the block's bbTryIndex, to ensure * that throw is protected by the correct set of trys. However, when we have * funclets we also have to ensure that the throw blocks are *not* shared * across funclets, so we use EHblkDsc index of either the funclet or * the containing try region, whichever is inner-most. We differentiate * between the 3 cases by setting the high bits (0 = try, 1 = handler, * 2 = filter) * */ unsigned Compiler::bbThrowIndex(BasicBlock* blk) { if (!blk->hasTryIndex() && !blk->hasHndIndex()) { return -1; } const unsigned tryIndex = blk->hasTryIndex() ? blk->getTryIndex() : USHRT_MAX; const unsigned hndIndex = blk->hasHndIndex() ? blk->getHndIndex() : USHRT_MAX; assert(tryIndex != hndIndex); assert(tryIndex != USHRT_MAX || hndIndex != USHRT_MAX); if (tryIndex < hndIndex) { // The most enclosing region is a try body, use it assert(tryIndex <= 0x3FFFFFFF); return tryIndex; } // The most enclosing region is a handler which will be a funclet // Now we have to figure out if blk is in the filter or handler assert(hndIndex <= 0x3FFFFFFF); if (ehGetDsc(hndIndex)->InFilterRegionBBRange(blk)) { return hndIndex | 0x40000000; } return hndIndex | 0x80000000; } #endif // FEATURE_EH_FUNCLETS /***************************************************************************** * Determine the emitter code cookie for a block, for unwind purposes. */ void* Compiler::ehEmitCookie(BasicBlock* block) { noway_assert(block); void* cookie; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (block->bbFlags & BBF_FINALLY_TARGET) { // Use the offset of the beginning of the NOP padding, not the main block. // This might include loop head padding, too, if this is a loop head. assert(block->bbUnwindNopEmitCookie); // probably not null-initialized, though, so this might not tell us // anything cookie = block->bbUnwindNopEmitCookie; } else #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) { cookie = block->bbEmitCookie; } noway_assert(cookie != nullptr); return cookie; } /***************************************************************************** * Determine the emitter code offset for a block. If the block is a finally * target, choose the offset of the NOP padding that precedes the block. */ UNATIVE_OFFSET Compiler::ehCodeOffset(BasicBlock* block) { return GetEmitter()->emitCodeOffset(ehEmitCookie(block), 0); } /****************************************************************************/ EHblkDsc* Compiler::ehInitHndRange(BasicBlock* blk, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter) { EHblkDsc* hndTab = ehGetBlockHndDsc(blk); if (hndTab != nullptr) { if (hndTab->InFilterRegionILRange(blk)) { *hndBeg = hndTab->ebdFilterBegOffs(); *hndEnd = hndTab->ebdFilterEndOffs(); *inFilter = true; } else { *hndBeg = hndTab->ebdHndBegOffs(); *hndEnd = hndTab->ebdHndEndOffs(); *inFilter = false; } } else { *hndBeg = 0; *hndEnd = info.compILCodeSize; *inFilter = false; } return hndTab; } /****************************************************************************/ EHblkDsc* Compiler::ehInitTryRange(BasicBlock* blk, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd) { EHblkDsc* tryTab = ehGetBlockTryDsc(blk); if (tryTab != nullptr) { *tryBeg = tryTab->ebdTryBegOffs(); *tryEnd = tryTab->ebdTryEndOffs(); } else { *tryBeg = 0; *tryEnd = info.compILCodeSize; } return tryTab; } /****************************************************************************/ EHblkDsc* Compiler::ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter) { EHblkDsc* hndTab = ehGetBlockHndDsc(blk); if (hndTab != nullptr) { if (hndTab->InFilterRegionBBRange(blk)) { *hndBeg = hndTab->ebdFilter; if (hndLast != nullptr) { *hndLast = hndTab->BBFilterLast(); } *inFilter = true; } else { *hndBeg = hndTab->ebdHndBeg; if (hndLast != nullptr) { *hndLast = hndTab->ebdHndLast; } *inFilter = false; } } else { *hndBeg = nullptr; if (hndLast != nullptr) { *hndLast = nullptr; } *inFilter = false; } return hndTab; } /****************************************************************************/ EHblkDsc* Compiler::ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast) { EHblkDsc* tryTab = ehGetBlockTryDsc(blk); if (tryTab != nullptr) { *tryBeg = tryTab->ebdTryBeg; if (tryLast != nullptr) { *tryLast = tryTab->ebdTryLast; } } else { *tryBeg = nullptr; if (tryLast != nullptr) { *tryLast = nullptr; } } return tryTab; } /***************************************************************************** * This method updates the value of ebdTryBeg */ void Compiler::fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg) { assert(newTryBeg != nullptr); // Check if we are going to change the existing value of endTryLast // if (handlerTab->ebdTryBeg != newTryBeg) { // Update the EH table with the newTryLast block handlerTab->ebdTryBeg = newTryBeg; JITDUMP("EH#%u: New first block of try: " FMT_BB "\n", ehGetIndex(handlerTab), handlerTab->ebdTryBeg->bbNum); } } /***************************************************************************** * This method updates the value of ebdTryLast. */ void Compiler::fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast) { assert(newTryLast != nullptr); // // Check if we are going to change the existing value of endTryLast // if (handlerTab->ebdTryLast != newTryLast) { // Update the EH table with the newTryLast block handlerTab->ebdTryLast = newTryLast; #ifdef DEBUG if (verbose) { printf("EH#%u: New last block of try: " FMT_BB "\n", ehGetIndex(handlerTab), newTryLast->bbNum); } #endif // DEBUG } } /***************************************************************************** * * This method updates the value of ebdHndLast. */ void Compiler::fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast) { assert(newHndLast != nullptr); // // Check if we are going to change the existing value of endHndLast // if (handlerTab->ebdHndLast != newHndLast) { // Update the EH table with the newHndLast block handlerTab->ebdHndLast = newHndLast; #ifdef DEBUG if (verbose) { printf("EH#%u: New last block of handler: " FMT_BB "\n", ehGetIndex(handlerTab), newHndLast->bbNum); } #endif // DEBUG } } /***************************************************************************** * * Given a EH handler table entry update the ebdTryLast and ebdHndLast pointers * to skip basic blocks that have been removed. They are set to the first * non-removed block after ebdTryBeg and ebdHndBeg, respectively. * * Note that removed blocks are not in the global list of blocks (no block in the * global list points to them). However, their pointers are still valid. We use * this fact when we walk lists of removed blocks until we find a non-removed * block, to be used for ending our iteration. */ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab) { BasicBlock* block; BasicBlock* bEnd; BasicBlock* bLast; // Update ebdTryLast bLast = nullptr; // Find the first non-removed block after the 'try' region to end our iteration. bEnd = handlerTab->ebdTryLast->bbNext; while ((bEnd != nullptr) && (bEnd->bbFlags & BBF_REMOVED)) { bEnd = bEnd->bbNext; } // Update bLast to account for any removed blocks block = handlerTab->ebdTryBeg; while (block != nullptr) { if ((block->bbFlags & BBF_REMOVED) == 0) { bLast = block; } block = block->bbNext; if (block == bEnd) { break; } } fgSetTryEnd(handlerTab, bLast); // Update ebdHndLast bLast = nullptr; // Find the first non-removed block after the handler region to end our iteration. bEnd = handlerTab->ebdHndLast->bbNext; while ((bEnd != nullptr) && (bEnd->bbFlags & BBF_REMOVED)) { bEnd = bEnd->bbNext; } // Update bLast to account for any removed blocks block = handlerTab->ebdHndBeg; while (block != nullptr) { if ((block->bbFlags & BBF_REMOVED) == 0) { bLast = block; } block = block->bbNext; if (block == bEnd) { break; } } fgSetHndEnd(handlerTab, bLast); } /***************************************************************************** * * Allocate the EH table */ void Compiler::fgAllocEHTable() { #if defined(FEATURE_EH_FUNCLETS) // We need to allocate space for EH clauses that will be used by funclets // as well as one for each EH clause from the IL. Nested EH clauses pulled // out as funclets create one EH clause for each enclosing region. Thus, // the maximum number of clauses we will need might be very large. We allocate // twice the number of EH clauses in the IL, which should be good in practice. // In extreme cases, we might need to abandon this and reallocate. See // fgAddEHTableEntry() for more details. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG compHndBBtabAllocCount = info.compXcptnsCount; // force the resizing code to hit more frequently in DEBUG #else // DEBUG compHndBBtabAllocCount = info.compXcptnsCount * 2; #endif // DEBUG #else // !FEATURE_EH_FUNCLETS compHndBBtabAllocCount = info.compXcptnsCount; #endif // !FEATURE_EH_FUNCLETS compHndBBtab = new (this, CMK_BasicBlock) EHblkDsc[compHndBBtabAllocCount]; compHndBBtabCount = info.compXcptnsCount; } /***************************************************************************** * * Remove a single exception table entry. Note that this changes the size of * the exception table. If calling this within a loop over the exception table * be careful to iterate again on the current entry (if XTnum) to not skip any. */ void Compiler::fgRemoveEHTableEntry(unsigned XTnum) { assert(compHndBBtabCount > 0); assert(XTnum < compHndBBtabCount); EHblkDsc* HBtab; /* Reduce the number of entries in the EH table by one */ compHndBBtabCount--; if (compHndBBtabCount == 0) { // No more entries remaining. // // We used to null out compHndBBtab here, but with OSR + Synch method // we may remove all the initial EH entries if not reachable in the // OSR portion, then need to add one for the synchronous exit. // // So now we just leave it be. } else { /* If we recorded an enclosing index for xtab then see * if it needs to be updated due to the removal of this entry */ HBtab = compHndBBtab + XTnum; for (EHblkDsc* const xtab : EHClauses(this)) { if ((xtab != HBtab) && (xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) && (xtab->ebdEnclosingTryIndex >= XTnum)) { // Update the enclosing scope link if (xtab->ebdEnclosingTryIndex == XTnum) { xtab->ebdEnclosingTryIndex = HBtab->ebdEnclosingTryIndex; } if ((xtab->ebdEnclosingTryIndex > XTnum) && (xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX)) { xtab->ebdEnclosingTryIndex--; } } if ((xtab != HBtab) && (xtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) && (xtab->ebdEnclosingHndIndex >= XTnum)) { // Update the enclosing scope link if (xtab->ebdEnclosingHndIndex == XTnum) { xtab->ebdEnclosingHndIndex = HBtab->ebdEnclosingHndIndex; } if ((xtab->ebdEnclosingHndIndex > XTnum) && (xtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX)) { xtab->ebdEnclosingHndIndex--; } } } /* We need to update all of the blocks' bbTryIndex */ for (BasicBlock* const blk : Blocks()) { if (blk->hasTryIndex()) { if (blk->getTryIndex() == XTnum) { noway_assert(blk->bbFlags & BBF_REMOVED); INDEBUG(blk->setTryIndex(MAX_XCPTN_INDEX);) // Note: this is still a legal index, just unlikely } else if (blk->getTryIndex() > XTnum) { blk->setTryIndex(blk->getTryIndex() - 1); } } if (blk->hasHndIndex()) { if (blk->getHndIndex() == XTnum) { noway_assert(blk->bbFlags & BBF_REMOVED); INDEBUG(blk->setHndIndex(MAX_XCPTN_INDEX);) // Note: this is still a legal index, just unlikely } else if (blk->getHndIndex() > XTnum) { blk->setHndIndex(blk->getHndIndex() - 1); } } } /* Now remove the unused entry from the table */ if (XTnum < compHndBBtabCount) { /* We copy over the old entry */ memmove(HBtab, HBtab + 1, (compHndBBtabCount - XTnum) * sizeof(*HBtab)); } else { /* Last entry. Don't need to do anything */ noway_assert(XTnum == compHndBBtabCount); } } } #if defined(FEATURE_EH_FUNCLETS) /***************************************************************************** * * Add a single exception table entry at index 'XTnum', [0 <= XTnum <= compHndBBtabCount]. * If 'XTnum' is compHndBBtabCount, then add the entry at the end. * Note that this changes the size of the exception table. * All the blocks referring to the various index values are updated. * The table entry itself is not filled in. * Returns a pointer to the new entry. */ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum) { if (XTnum != compHndBBtabCount) { // Update all enclosing links that will get invalidated by inserting an entry at 'XTnum' for (EHblkDsc* const xtab : EHClauses(this)) { if ((xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) && (xtab->ebdEnclosingTryIndex >= XTnum)) { // Update the enclosing scope link xtab->ebdEnclosingTryIndex++; } if ((xtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) && (xtab->ebdEnclosingHndIndex >= XTnum)) { // Update the enclosing scope link xtab->ebdEnclosingHndIndex++; } } // We need to update the BasicBlock bbTryIndex and bbHndIndex field for all blocks for (BasicBlock* const blk : Blocks()) { if (blk->hasTryIndex() && (blk->getTryIndex() >= XTnum)) { blk->setTryIndex(blk->getTryIndex() + 1); } if (blk->hasHndIndex() && (blk->getHndIndex() >= XTnum)) { blk->setHndIndex(blk->getHndIndex() + 1); } } } // Increase the number of entries in the EH table by one if (compHndBBtabCount == compHndBBtabAllocCount) { // We need to reallocate the table if (compHndBBtabAllocCount == MAX_XCPTN_INDEX) { // We're already at the max size for indices to be unsigned short IMPL_LIMITATION("too many exception clauses"); } // Double the table size. For stress, we could use +1. Note that if the table isn't allocated // yet, such as when we add an EH region for synchronized methods that don't already have one, // we start at zero, so we need to make sure the new table has at least one entry. unsigned newHndBBtabAllocCount = max(1, compHndBBtabAllocCount * 2); noway_assert(compHndBBtabAllocCount < newHndBBtabAllocCount); // check for overflow if (newHndBBtabAllocCount > MAX_XCPTN_INDEX) { newHndBBtabAllocCount = MAX_XCPTN_INDEX; // increase to the maximum size we allow } JITDUMP("*********** fgAddEHTableEntry: increasing EH table size from %d to %d\n", compHndBBtabAllocCount, newHndBBtabAllocCount); compHndBBtabAllocCount = newHndBBtabAllocCount; EHblkDsc* newTable = new (this, CMK_BasicBlock) EHblkDsc[compHndBBtabAllocCount]; // Move over the stuff before the new entry memcpy_s(newTable, compHndBBtabAllocCount * sizeof(*compHndBBtab), compHndBBtab, XTnum * sizeof(*compHndBBtab)); if (XTnum != compHndBBtabCount) { // Move over the stuff after the new entry memcpy_s(newTable + XTnum + 1, (compHndBBtabAllocCount - XTnum - 1) * sizeof(*compHndBBtab), compHndBBtab + XTnum, (compHndBBtabCount - XTnum) * sizeof(*compHndBBtab)); } // Now set the new table as the table to use. The old one gets lost, but we can't // free it because we don't have a freeing allocator. compHndBBtab = newTable; } else if (XTnum != compHndBBtabCount) { // Leave the elements before the new element alone. Move the ones after it, to make space. EHblkDsc* HBtab = compHndBBtab + XTnum; memmove_s(HBtab + 1, (compHndBBtabAllocCount - XTnum - 1) * sizeof(*compHndBBtab), HBtab, (compHndBBtabCount - XTnum) * sizeof(*compHndBBtab)); } // Now the entry is there, but not filled in compHndBBtabCount++; return compHndBBtab + XTnum; } #endif // FEATURE_EH_FUNCLETS #if !FEATURE_EH /***************************************************************************** * fgRemoveEH: To facilitate the bring-up of new platforms without having to * worry about fully implementing EH, we want to simply remove EH constructs * from the IR. This works because a large percentage of our tests contain * EH constructs but don't actually throw exceptions. This function removes * 'catch', 'filter', 'filter-handler', and 'fault' clauses completely. * It requires that the importer has created the EH table, and that normal * EH well-formedness tests have been done, and 'leave' opcodes have been * imported. * * It currently does not handle 'finally' clauses, so tests that include * 'finally' will NYI(). To handle 'finally', we would need to inline the * 'finally' clause IL at each exit from a finally-protected 'try', or * else call the 'finally' clause, like normal. * * Walk the EH table from beginning to end. If a table entry is nested within * a handler, we skip it, as we'll delete its code when we get to the enclosing * handler. If a clause is enclosed within a 'try', or has no nesting, then we delete * it (and its range of code blocks). We don't need to worry about cleaning up * the EH table entries as we remove the individual handlers (such as calling * fgRemoveEHTableEntry()), as we'll null out the entire table at the end. * * This function assumes FEATURE_EH_FUNCLETS is defined. */ void Compiler::fgRemoveEH() { #ifdef DEBUG if (verbose) printf("\n*************** In fgRemoveEH()\n"); #endif // DEBUG if (compHndBBtabCount == 0) { JITDUMP("No EH to remove\n\n"); return; } #ifdef DEBUG if (verbose) { printf("\n*************** Before fgRemoveEH()\n"); fgDispBasicBlocks(); fgDispHandlerTab(); printf("\n"); } #endif // DEBUG // Make sure we're early in compilation, so we don't need to update lots of data structures. assert(!fgComputePredsDone); assert(!fgDomsComputed); assert(!fgFuncletsCreated); assert(fgFirstFuncletBB == nullptr); // this should follow from "!fgFuncletsCreated" assert(!optLoopsMarked); unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { if (HBtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) { // This entry is nested within some other handler. So, don't delete the // EH entry here; let the enclosing handler delete it. Note that for this // EH entry, both the 'try' and handler portions are fully nested within // the enclosing handler region, due to proper nesting rules. continue; } if (HBtab->HasCatchHandler() || HBtab->HasFilter() || HBtab->HasFaultHandler()) { // Remove all the blocks associated with the handler. Note that there is no // fall-through into the handler, or fall-through out of the handler, so // just deleting the blocks is sufficient. Note, however, that for every // BBJ_EHCATCHRET we delete, we need to fix up the reference count of the // block it points to (by subtracting one from its reference count). // Note that the blocks for a filter immediately preceed the blocks for its associated filter-handler. BasicBlock* blkBeg = HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg; BasicBlock* blkLast = HBtab->ebdHndLast; // Splice out the range of blocks from blkBeg to blkLast (inclusive). fgUnlinkRange(blkBeg, blkLast); BasicBlock* blk; // Walk the unlinked blocks and marked them as having been removed. for (blk = blkBeg; blk != blkLast->bbNext; blk = blk->bbNext) { blk->bbFlags |= BBF_REMOVED; if (blk->bbJumpKind == BBJ_EHCATCHRET) { assert(blk->bbJumpDest->bbRefs > 0); blk->bbJumpDest->bbRefs -= 1; } } // Walk the blocks of the 'try' and clear data that makes them appear to be within a 'try'. for (blk = HBtab->ebdTryBeg; blk != HBtab->ebdTryLast->bbNext; blk = blk->bbNext) { blk->clearTryIndex(); blk->bbFlags &= ~BBF_TRY_BEG; } // If we are deleting a range of blocks whose last block is // the 'last' block of an enclosing try/hnd region, we need to // fix up the EH table. We only care about less nested // EH table entries, since we've already deleted everything up to XTnum. unsigned XTnum2; EHblkDsc* HBtab2; for (XTnum2 = XTnum + 1, HBtab2 = compHndBBtab + XTnum2; XTnum2 < compHndBBtabCount; XTnum2++, HBtab2++) { // Handle case where deleted range is at the end of a 'try'. if (HBtab2->ebdTryLast == blkLast) { fgSetTryEnd(HBtab2, blkBeg->bbPrev); } // Handle case where deleted range is at the end of a handler. // (This shouldn't happen, though, because we don't delete handlers // nested within other handlers; we wait until we get to the // enclosing handler.) if (HBtab2->ebdHndLast == blkLast) { unreached(); } } } else { // It must be a 'finally'. We still need to call the finally. Note that the // 'finally' can be "called" from multiple locations (e.g., the 'try' block // can have multiple 'leave' instructions, each leaving to different targets, // and each going through the 'finally'). We could inline the 'finally' at each // LEAVE site within a 'try'. If the 'try' exits at all (that is, no infinite loop), // there will be at least one since there is no "fall through" at the end of // the 'try'. assert(HBtab->HasFinallyHandler()); NYI("remove finally blocks"); } } /* end of the for loop over XTnum */ #ifdef DEBUG // Make sure none of the remaining blocks have any EH. for (BasicBlock* const blk : Blocks()) { assert(!blk->hasTryIndex()); assert(!blk->hasHndIndex()); assert((blk->bbFlags & BBF_TRY_BEG) == 0); assert((blk->bbFlags & BBF_FUNCLET_BEG) == 0); assert((blk->bbFlags & BBF_REMOVED) == 0); assert(blk->bbCatchTyp == BBCT_NONE); } #endif // DEBUG // Delete the EH table compHndBBtab = nullptr; compHndBBtabCount = 0; // Leave compHndBBtabAllocCount alone. // Renumber the basic blocks JITDUMP("\nRenumbering the basic blocks for fgRemoveEH\n"); fgRenumberBlocks(); #ifdef DEBUG if (verbose) { printf("\n*************** After fgRemoveEH()\n"); fgDispBasicBlocks(); fgDispHandlerTab(); printf("\n"); } #endif } #endif // !FEATURE_EH /***************************************************************************** * * Sort the EH table if necessary. */ void Compiler::fgSortEHTable() { if (!fgNeedToSortEHTable) { return; } // Now, all fields of the EH table are set except for those that are related // to nesting. We need to first sort the table to ensure that an EH clause // appears before any try or handler that it is nested within. The CLI spec // requires this for nesting in 'try' clauses, but does not require this // for handler clauses. However, parts of the JIT do assume this ordering. // // For example: // // try { // A // } catch { // try { // B // } catch { // } // } // // In this case, the EH clauses for A and B have no required ordering: the // clause for either A or B can come first, despite B being nested within // the catch clause for A. // // The CLI spec, section 12.4.2.5 "Overview of exception handling", states: // "The ordering of the exception clauses in the Exception Handler Table is // important. If handlers are nested, the most deeply nested try blocks shall // come before the try blocks that enclose them." // // Note, in particular, that it doesn't say "shall come before the *handler* // blocks that enclose them". // // Also, the same section states, "When an exception occurs, the CLI searches // the array for the first protected block that (1) Protects a region including the // current instruction pointer and (2) Is a catch handler block and (3) Whose // filter wishes to handle the exception." // // Once again, nothing about the ordering of the catch blocks. // // A more complicated example: // // try { // A // } catch { // try { // B // try { // C // } catch { // } // } catch { // } // } // // The clause for C must come before the clause for B, but the clause for A can // be anywhere. Thus, we could have these orderings: ACB, CAB, CBA. // // One more example: // // try { // A // } catch { // try { // B // } catch { // try { // C // } catch { // } // } // } // // There is no ordering requirement: the EH clauses can come in any order. // // In Dev11 (Visual Studio 2012), x86 did not sort the EH table (it never had before) // but ARM did. It turns out not sorting the table can cause the EH table to incorrectly // set the bbHndIndex value in some nested cases, and that can lead to a security exploit // that allows the execution of arbitrary code. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("fgSortEHTable: Sorting EH table\n"); } #endif // DEBUG EHblkDsc* xtab1; EHblkDsc* xtab2; unsigned xtabnum1, xtabnum2; for (xtabnum1 = 0, xtab1 = compHndBBtab; xtabnum1 < compHndBBtabCount; xtabnum1++, xtab1++) { for (xtabnum2 = xtabnum1 + 1, xtab2 = xtab1 + 1; xtabnum2 < compHndBBtabCount; xtabnum2++, xtab2++) { // If the nesting is wrong, swap them. The nesting is wrong if // EH region 2 is nested in the try, handler, or filter of EH region 1. // Note that due to proper nesting rules, if any of 2 is nested in // the try or handler or filter of 1, then all of 2 is nested. // We must be careful when comparing the offsets of the 'try' clause, because // for "mutually-protect" try/catch, the 'try' bodies will be identical. // For this reason, we use the handler region to check nesting. Note // that we must check both beginning and end: a nested region can have a 'try' // body that starts at the beginning of a handler. Thus, if we just compared the // handler begin offset, we might get confused and think it is nested. IL_OFFSET hndBegOff = xtab2->ebdHndBegOffset; IL_OFFSET hndEndOff = xtab2->ebdHndEndOffset; assert(hndEndOff > hndBegOff); if ((hndBegOff >= xtab1->ebdTryBegOffset && hndEndOff <= xtab1->ebdTryEndOffset) || (hndBegOff >= xtab1->ebdHndBegOffset && hndEndOff <= xtab1->ebdHndEndOffset) || (xtab1->HasFilter() && (hndBegOff >= xtab1->ebdFilterBegOffset && hndEndOff <= xtab1->ebdHndBegOffset)) // Note that end of filter is beginning of handler ) { #ifdef DEBUG if (verbose) { printf("fgSortEHTable: Swapping out-of-order EH#%u and EH#%u\n", xtabnum1, xtabnum2); } // Assert that the 'try' region is also nested in the same place as the handler IL_OFFSET tryBegOff = xtab2->ebdTryBegOffset; IL_OFFSET tryEndOff = xtab2->ebdTryEndOffset; assert(tryEndOff > tryBegOff); if (hndBegOff >= xtab1->ebdTryBegOffset && hndEndOff <= xtab1->ebdTryEndOffset) { assert(tryBegOff >= xtab1->ebdTryBegOffset && tryEndOff <= xtab1->ebdTryEndOffset); } if (hndBegOff >= xtab1->ebdHndBegOffset && hndEndOff <= xtab1->ebdHndEndOffset) { assert(tryBegOff >= xtab1->ebdHndBegOffset && tryEndOff <= xtab1->ebdHndEndOffset); } if (xtab1->HasFilter() && (hndBegOff >= xtab1->ebdFilterBegOffset && hndEndOff <= xtab1->ebdHndBegOffset)) { assert(tryBegOff >= xtab1->ebdFilterBegOffset && tryEndOff <= xtab1->ebdHndBegOffset); } #endif // DEBUG // Swap them! EHblkDsc tmp = *xtab1; *xtab1 = *xtab2; *xtab2 = tmp; } } } } // fgNormalizeEH: Enforce the following invariants: // // 1. No block is both the first block of a handler and the first block of a try. In IL (and on entry // to this function), this can happen if the "try" is more nested than the handler. // // For example, consider: // // try1 ----------------- BB01 // | BB02 // |--------------------- BB03 // handler1 // |----- try2 ---------- BB04 // | | BB05 // | handler2 ------ BB06 // | | BB07 // | --------------- BB08 // |--------------------- BB09 // // Thus, the start of handler1 and the start of try2 are the same block. We will transform this to: // // try1 ----------------- BB01 // | BB02 // |--------------------- BB03 // handler1 ------------- BB10 // empty block // | try2 ---------- BB04 // | | BB05 // | handler2 ------ BB06 // | | BB07 // | --------------- BB08 // |--------------------- BB09 // // 2. No block is the first block of more than one try or handler region. // (Note that filters cannot have EH constructs nested within them, so there can be no nested try or // handler that shares the filter begin or last block. For try/filter/filter-handler constructs nested // within a try or handler region, note that the filter block cannot be the first block of the try, // nor can it be the first block of the handler, since you can't "fall into" a filter, which that situation // would require.) // // For example, we will transform this: // // try3 try2 try1 // |--- |--- |--- BB01 // | | | BB02 // | | |--- BB03 // | | BB04 // | |------------ BB05 // | BB06 // |------------------- BB07 // // to this: // // try3 ------------- BB08 // empty BBJ_NONE block // | try2 ------ BB09 // empty BBJ_NONE block // | | try1 // | | |--- BB01 // | | | BB02 // | | |--- BB03 // | | BB04 // | |------------ BB05 // | BB06 // |------------------- BB07 // // The benefit of this is that adding a block to an EH region will not require examining every EH region, // looking for possible shared "first" blocks to adjust. It also makes it easier to put code at the top // of a particular EH region, especially for loop optimizations. // // These empty blocks (BB08, BB09) will generate no code (unless some code is subsequently placed into them), // and will have the same native code offset as BB01 after code is generated. There may be labels generated // for them, if they are branch targets, so it is possible to have multiple labels targeting the same native // code offset. The blocks will not be merged with the blocks they are split from, because they will have a // different EH region, and we don't merge blocks from two different EH regions. // // In the example, if there are branches to BB01, we need to distribute them to BB01, BB08, or BB09, appropriately. // 1. A branch from BB01/BB02/BB03 to BB01 will still go to BB01. Branching to BB09 or BB08 would not be legal, // since it would branch out of a try region. // 2. A branch from BB04/BB05 to BB01 will instead branch to BB09. Branching to BB08 would not be legal. Note // that branching to BB01 would still be legal, so we have a choice. It makes the most sense to branch to BB09, // so the source and target of a branch are in the same EH region. // 3. Similarly, a branch from BB06/BB07 to BB01 will go to BB08, even though branching to BB09 would be legal. // 4. A branch from outside this loop (at the top-level) to BB01 will go to BB08. This is one case where the // source and target of the branch are not in the same EH region. // // The EH nesting rules for IL branches are described in the ECMA spec section 12.4.2.8.2.7 "Branches" and // section 12.4.2.8.2.9 "Examples". // // There is one exception to this normalization rule: we do not change "mutually protect" regions. These are cases // where two EH table entries have exactly the same 'try' region, used to implement C# "try / catch / catch". // The first handler appears by our nesting to be an "inner" handler, with ebdEnclosingTryIndex pointing to the // second one. It is not true nesting, though, since they both protect the same "try". Both the these EH table // entries must keep the same "try" region begin/last block pointers. A block in this "try" region has a try index // of the first ("most nested") EH table entry. // // 3. No block is the last block of more than one try or handler region. Again, as described above, // filters need not be considered. // // For example, we will transform this: // // try3 ----------------- BB01 // | try2 ---------- BB02 // | | handler1 BB03 // | | | BB04 // |----- |----- |------- BB05 // // (where all three try regions end at BB05) to this: // // try3 ----------------- BB01 // | try2 ---------- BB02 // | | handler1 BB03 // | | | BB04 // | | |------- BB05 // | |-------------- BB06 // empty BBJ_NONE block // |--------------------- BB07 // empty BBJ_NONE block // // No branches need to change: if something branched to BB05, it will still branch to BB05. If BB05 is a // BBJ_NONE block, then control flow will fall through the newly added blocks as well. If it is anything // else, it will retain that block branch type and BB06 and BB07 will be unreachable. // // The benefit of this is, once again, to remove the need to consider every EH region when adding new blocks. // // Overall, a block can appear in the EH table exactly once: as the begin or last block of a single try, filter, or // handler. There is one exception: for a single-block EH region, the block can appear as both the "begin" and "last" // block of the try, or the "begin" and "last" block of the handler (note that filters don't have a "last" block stored, // so this case doesn't apply.) // (Note: we could remove this special case if we wanted, and if it helps anything, but it doesn't appear that it will // help.) // // These invariants simplify a number of things. When inserting a new block into a region, it is not necessary to // traverse the entire EH table looking to see if any EH region needs to be updated. You only ever need to update a // single region (except for mutually-protect "try" regions). // // Also, for example, when we're trying to determine the successors of a block B1 that leads into a try T1, if a block // B2 violates invariant #3 by being the first block of both the handler of T1, and an enclosed try T2, inserting a // block to enforce this invariant prevents us from having to consider the first block of T2's handler as a possible // successor of B1. This is somewhat akin to breaking of "critical edges" in a flowgraph. void Compiler::fgNormalizeEH() { if (compHndBBtabCount == 0) { // No EH? Nothing to do. INDEBUG(fgNormalizeEHDone = true;) return; } #ifdef DEBUG if (verbose) { printf("*************** In fgNormalizeEH()\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif bool modified = false; // Case #1: Prevent the first block of a handler from also being the first block of a 'try'. if (fgNormalizeEHCase1()) { modified = true; } // Case #2: Prevent any two EH regions from starting with the same block (after case #3, we only need to worry about // 'try' blocks). if (fgNormalizeEHCase2()) { modified = true; } #if 0 // Case 3 normalization is disabled. The JIT really doesn't like having extra empty blocks around, especially // blocks that are unreachable. There are lots of asserts when such things occur. We will re-evaluate whether we // can do this normalization. // Note: there are cases in fgVerifyHandlerTab() that are also disabled to match this. // Case #3: Prevent any two EH regions from ending with the same block. if (fgNormalizeEHCase3()) { modified = true; } #endif // 0 INDEBUG(fgNormalizeEHDone = true;) if (modified) { // If we computed the cheap preds, don't let them leak out, in case other code doesn't maintain them properly. if (fgCheapPredsValid) { fgRemovePreds(); } JITDUMP("Added at least one basic block in fgNormalizeEH.\n"); fgRenumberBlocks(); #ifdef DEBUG // fgRenumberBlocks() will dump all the blocks and the handler table, so we don't need to do it here. fgVerifyHandlerTab(); #endif } else { JITDUMP("No EH normalization performed.\n"); } } bool Compiler::fgNormalizeEHCase1() { bool modified = false; // // Case #1: Is the first block of a handler also the first block of any try? // // Do this as a separate loop from case #2 to simplify the logic for cases where we have both multiple identical // 'try' begin blocks as well as this case, e.g.: // try { // } finally { try { try { // } catch {} // } catch {} // } // where the finally/try/try are all the same block. // We also do this before case #2, so when we get to case #2, we only need to worry about updating 'try' begin // blocks (and only those within the 'try' region's parents), not handler begin blocks, when we are inserting new // header blocks. // for (unsigned XTnum = 0; XTnum < compHndBBtabCount; XTnum++) { EHblkDsc* eh = ehGetDsc(XTnum); BasicBlock* handlerStart = eh->ebdHndBeg; EHblkDsc* handlerStartContainingTry = ehGetBlockTryDsc(handlerStart); // If the handler start block is in a try, and is in fact the first block of that try... if (handlerStartContainingTry != nullptr && handlerStartContainingTry->ebdTryBeg == handlerStart) { // ...then we want to insert an empty, non-removable block outside the try to be the new first block of the // handler. BasicBlock* newHndStart = bbNewBasicBlock(BBJ_NONE); fgInsertBBbefore(eh->ebdHndBeg, newHndStart); #ifdef DEBUG if (verbose) { printf("Handler begin for EH#%02u and 'try' begin for EH%02u are the same block; inserted new " FMT_BB " " "before " FMT_BB " as new handler begin for EH#%u.\n", XTnum, ehGetIndex(handlerStartContainingTry), newHndStart->bbNum, eh->ebdHndBeg->bbNum, XTnum); } #endif // DEBUG // The new block is the new handler begin. eh->ebdHndBeg = newHndStart; // Try index is the same as the enclosing try, if any, of eh: if (eh->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { newHndStart->clearTryIndex(); } else { newHndStart->setTryIndex(eh->ebdEnclosingTryIndex); } newHndStart->setHndIndex(XTnum); newHndStart->bbCatchTyp = handlerStart->bbCatchTyp; handlerStart->bbCatchTyp = BBCT_NONE; // Now handlerStart is no longer the start of a handler... newHndStart->bbCodeOffs = handlerStart->bbCodeOffs; newHndStart->bbCodeOffsEnd = newHndStart->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead? newHndStart->inheritWeight(handlerStart); newHndStart->bbFlags |= (BBF_DONT_REMOVE | BBF_INTERNAL); modified = true; #ifdef DEBUG if (0 && verbose) // Normally this is way too verbose, but it is useful for debugging { printf("*************** fgNormalizeEH() made a change\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } } return modified; } bool Compiler::fgNormalizeEHCase2() { bool modified = false; // // Case #2: Make sure no two 'try' have the same begin block (except for mutually-protect regions). // Note that this can only happen for nested 'try' regions, so we only need to look through the // 'try' nesting hierarchy. // for (unsigned XTnum = 0; XTnum < compHndBBtabCount; XTnum++) { EHblkDsc* eh = ehGetDsc(XTnum); if (eh->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) { BasicBlock* tryStart = eh->ebdTryBeg; BasicBlock* insertBeforeBlk = tryStart; // If we need to insert new blocks, we insert before this block. // We need to keep track of the last "mutually protect" region so we can properly not add additional header // blocks to the second and subsequent mutually protect try blocks. We can't just keep track of the EH // region pointer, because we're updating the 'try' begin blocks as we go. So, we need to keep track of the // pre-update 'try' begin/last blocks themselves. BasicBlock* mutualTryBeg = eh->ebdTryBeg; BasicBlock* mutualTryLast = eh->ebdTryLast; unsigned mutualProtectIndex = XTnum; EHblkDsc* ehOuter = eh; do { unsigned ehOuterTryIndex = ehOuter->ebdEnclosingTryIndex; ehOuter = ehGetDsc(ehOuterTryIndex); BasicBlock* outerTryStart = ehOuter->ebdTryBeg; if (outerTryStart == tryStart) { // We found two EH regions with the same 'try' begin! Should we do something about it? if (ehOuter->ebdIsSameTry(mutualTryBeg, mutualTryLast)) { // clang-format off // Don't touch mutually-protect regions: their 'try' regions must remain identical! // We want to continue the looping outwards, in case we have something like this: // // try3 try2 try1 // |--- |---- |---- BB01 // | | | BB02 // | |---- |---- BB03 // | BB04 // |------------------- BB05 // // (Thus, try1 & try2 are mutually-protect 'try' regions from BB01 to BB03. They are nested inside try3, // which also starts at BB01. The 'catch' clauses have been elided.) // In this case, we'll decline to add a new header block for try2, but we will add a new one for try3, ending with: // // try3 try2 try1 // |------------------- BB06 // | |---- |---- BB01 // | | | BB02 // | |---- |---- BB03 // | BB04 // |------------------- BB05 // // More complicated (yes, this is real): // // try { // try { // try { // try { // try { // try { // try { // try { // } // catch {} // mutually-protect set #1 // catch {} // } finally {} // } // catch {} // mutually-protect set #2 // catch {} // catch {} // } finally {} // } catch {} // } finally {} // } catch {} // } finally {} // // In this case, all the 'try' start at the same block! Note that there are two sets of mutually-protect regions, // separated by some nesting. // clang-format on #ifdef DEBUG if (verbose) { printf("Mutually protect regions EH#%u and EH#%u; leaving identical 'try' begin blocks.\n", mutualProtectIndex, ehGetIndex(ehOuter)); } #endif // DEBUG // We still need to update the tryBeg, if something more nested already did that. ehOuter->ebdTryBeg = insertBeforeBlk; } else { // We're in a new set of mutual protect regions, so don't compare against the original. mutualTryBeg = ehOuter->ebdTryBeg; mutualTryLast = ehOuter->ebdTryLast; mutualProtectIndex = ehOuterTryIndex; // We're going to need the preds. We compute them here, before inserting the new block, // so our logic to add/remove preds below is the same for both the first time preds are // created and subsequent times. if (!fgCheapPredsValid) { fgComputeCheapPreds(); } // We've got multiple 'try' blocks starting at the same place! // Add a new first 'try' block for 'ehOuter' that will be outside 'eh'. BasicBlock* newTryStart = bbNewBasicBlock(BBJ_NONE); fgInsertBBbefore(insertBeforeBlk, newTryStart); #ifdef DEBUG if (verbose) { printf("'try' begin for EH#%u and EH#%u are same block; inserted new " FMT_BB " before " FMT_BB " " "as new 'try' begin for EH#%u.\n", ehOuterTryIndex, XTnum, newTryStart->bbNum, insertBeforeBlk->bbNum, ehOuterTryIndex); } #endif // DEBUG // The new block is the new 'try' begin. ehOuter->ebdTryBeg = newTryStart; newTryStart->copyEHRegion(tryStart); // Copy the EH region info newTryStart->setTryIndex(ehOuterTryIndex); // ... but overwrite the 'try' index newTryStart->bbCatchTyp = BBCT_NONE; newTryStart->bbCodeOffs = tryStart->bbCodeOffs; newTryStart->bbCodeOffsEnd = newTryStart->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead? newTryStart->inheritWeight(tryStart); // Note that we don't need to clear any flags on the old try start, since it is still a 'try' // start. newTryStart->bbFlags |= (BBF_TRY_BEG | BBF_DONT_REMOVE | BBF_INTERNAL); // Now we need to split any flow edges targetting the old try begin block between the old // and new block. Note that if we are handling a multiply-nested 'try', we may have already // split the inner set. So we need to split again, from the most enclosing block that we've // already created, namely, insertBeforeBlk. // // For example: // // try3 try2 try1 // |---- |---- |---- BB01 // | | | BB02 // | | |---- BB03 // | |----------- BB04 // |------------------ BB05 // // We'll loop twice, to create two header blocks, one for try2, and the second time for try3 // (in that order). // After the first loop, we have: // // try3 try2 try1 // |---- BB06 // |---- | |---- BB01 // | | | BB02 // | | |---- BB03 // | |----------- BB04 // |------------------ BB05 // // And all the external edges have been changed to point at try2. On the next loop, we'll create // a unique header block for try3, and split the edges between try2 and try3, leaving us with: // // try3 try2 try1 // |---- BB07 // | |---- BB06 // | | |---- BB01 // | | | BB02 // | | |---- BB03 // | |----------- BB04 // |------------------ BB05 BasicBlockList* nextPred; // we're going to update the pred list as we go, so we need to keep // track of the next pred in case it gets deleted. for (BasicBlockList* pred = insertBeforeBlk->bbCheapPreds; pred != nullptr; pred = nextPred) { nextPred = pred->next; // Who gets this predecessor? BasicBlock* predBlock = pred->block; if (!BasicBlock::sameTryRegion(insertBeforeBlk, predBlock)) { // Move the edge to target newTryStart instead of insertBeforeBlk. fgAddCheapPred(newTryStart, predBlock); fgRemoveCheapPred(insertBeforeBlk, predBlock); // Now change the branch. If it was a BBJ_NONE fall-through to the top block, this will // do nothing. Since cheap preds contains dups (for switch duplicates), we will call // this once per dup. fgReplaceJumpTarget(predBlock, newTryStart, insertBeforeBlk); // Need to adjust ref counts here since we're retargeting edges. newTryStart->bbRefs++; assert(insertBeforeBlk->countOfInEdges() > 0); insertBeforeBlk->bbRefs--; #ifdef DEBUG if (verbose) { printf("Redirect " FMT_BB " target from " FMT_BB " to " FMT_BB ".\n", predBlock->bbNum, insertBeforeBlk->bbNum, newTryStart->bbNum); } #endif // DEBUG } } // The new block (a fall-through block) is a new predecessor. fgAddCheapPred(insertBeforeBlk, newTryStart); // We don't need to update the tryBeg block of other EH regions here because we are looping // outwards in enclosing try index order, and we'll get to them later. // Move the insert block backwards, to the one we just inserted. insertBeforeBlk = insertBeforeBlk->bbPrev; assert(insertBeforeBlk == newTryStart); modified = true; #ifdef DEBUG if (0 && verbose) // Normally this is way too verbose, but it is useful for debugging { printf("*************** fgNormalizeEH() made a change\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } } else { // If the 'try' start block in the outer block isn't the same, then none of the more-enclosing // try regions (if any) can have the same 'try' start block, so we're done. // Note that we could have a situation like this: // // try4 try3 try2 try1 // |--- |--- | | BB01 // | | | | BB02 // | | |---- |---- BB03 // | | | BB04 // | | |------------ BB05 // | | BB06 // | |------------------- BB07 // |-------------------------- BB08 // // (Thus, try1 & try2 start at BB03, and are nested inside try3 & try4, which both start at BB01.) // In this case, we'll process try1 and try2, then break out. Later, we'll get to try3 and process // it and try4. break; } } while (ehOuter->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX); } } return modified; } bool Compiler::fgNormalizeEHCase3() { bool modified = false; // // Case #3: Make sure no two 'try' or handler regions have the same 'last' block (except for mutually protect 'try' // regions). As above, there has to be EH region nesting for this to occur. However, since we need to consider // handlers, there are more cases. // // There are four cases to consider: // (1) try nested in try // (2) handler nested in try // (3) try nested in handler // (4) handler nested in handler // // Note that, before funclet generation, it would be unusual, though legal IL, for a 'try' to come at the end // of an EH region (either 'try' or handler region), since that implies that its corresponding handler precedes it. // That will never happen in C#, but is legal in IL. // // Only one of these cases can happen. For example, if we have case (2), where a try/catch is nested in a 'try' and // the nested handler has the same 'last' block as the outer handler, then, due to nesting rules, the nested 'try' // must also be within the outer handler, and obviously cannot share the same 'last' block. // for (unsigned XTnum = 0; XTnum < compHndBBtabCount; XTnum++) { EHblkDsc* eh = ehGetDsc(XTnum); // Find the EH region 'eh' is most nested within, either 'try' or handler or none. bool outerIsTryRegion; unsigned ehOuterIndex = eh->ebdGetEnclosingRegionIndex(&outerIsTryRegion); if (ehOuterIndex != EHblkDsc::NO_ENCLOSING_INDEX) { EHblkDsc* ehInner = eh; // This gets updated as we loop outwards in the EH nesting unsigned ehInnerIndex = XTnum; // This gets updated as we loop outwards in the EH nesting bool innerIsTryRegion; EHblkDsc* ehOuter = ehGetDsc(ehOuterIndex); // Debugging: say what type of block we're updating. INDEBUG(const char* outerType = ""; const char* innerType = "";) // 'insertAfterBlk' is the place we will insert new "normalization" blocks. We don't know yet if we will // insert them after the innermost 'try' or handler's "last" block, so we set it to nullptr. Once we // determine the innermost region that is equivalent, we set this, and then update it incrementally as we // loop outwards. BasicBlock* insertAfterBlk = nullptr; bool foundMatchingLastBlock = false; // This is set to 'false' for mutual protect regions for which we will not insert a normalization block. bool insertNormalizationBlock = true; // Keep track of what the 'try' index and handler index should be for any new normalization block that we // insert. If we have a sequence of alternating nested 'try' and handlers with the same 'last' block, we'll // need to update these as we go. For example: // try { // EH#5 // ... // catch { // EH#4 // ... // try { // EH#3 // ... // catch { // EH#2 // ... // try { // EH#1 // BB01 // try=1, hnd=2 // } } } } } // all the 'last' blocks are the same // // after normalization: // // try { // EH#5 // ... // catch { // EH#4 // ... // try { // EH#3 // ... // catch { // EH#2 // ... // try { // EH#1 // BB01 // try=1, hnd=2 // } // BB02 // try=3, hnd=2 // } // BB03 // try=3, hnd=4 // } // BB04 // try=5, hnd=4 // } // BB05 // try=5, hnd=0 (no enclosing hnd) // } // unsigned nextTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; // Initialization only needed to quell compiler // warnings. unsigned nextHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; // We compare the outer region against the inner region's 'try' or handler, determined by the // 'outerIsTryRegion' variable. Once we decide that, we know exactly the 'last' pointer that we will use to // compare against all enclosing EH regions. // // For example, if we have these nested EH regions (omitting some corresponding try/catch clauses for each // nesting level): // // try { // ... // catch { // ... // try { // } } } // all the 'last' blocks are the same // // then we determine that the innermost region we are going to compare against is the 'try' region. There's // no reason to compare against its handler region for any enclosing region (since it couldn't possibly // share a 'last' block with the enclosing region). However, there's no harm, either (and it simplifies // the code for the first set of comparisons to be the same as subsequent, more enclosing cases). BasicBlock* lastBlockPtrToCompare = nullptr; // We need to keep track of the last "mutual protect" region so we can properly not add additional blocks // to the second and subsequent mutual protect try blocks. We can't just keep track of the EH region // pointer, because we're updating the last blocks as we go. So, we need to keep track of the // pre-update 'try' begin/last blocks themselves. These only matter if the "last" blocks that match are // from two (or more) nested 'try' regions. BasicBlock* mutualTryBeg = nullptr; BasicBlock* mutualTryLast = nullptr; if (outerIsTryRegion) { nextTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; // unused, since the outer block is a 'try' region. // The outer (enclosing) region is a 'try' if (ehOuter->ebdTryLast == ehInner->ebdTryLast) { // Case (1) try nested in try. foundMatchingLastBlock = true; INDEBUG(innerType = "try"; outerType = "try";) insertAfterBlk = ehOuter->ebdTryLast; lastBlockPtrToCompare = insertAfterBlk; if (EHblkDsc::ebdIsSameTry(ehOuter, ehInner)) { // We can't touch this 'try', since it's mutual protect. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("Mutual protect regions EH#%u and EH#%u; leaving identical 'try' last blocks.\n", ehOuterIndex, ehInnerIndex); } #endif // DEBUG insertNormalizationBlock = false; } else { nextHndIndex = ehInner->ebdTryLast->hasHndIndex() ? ehInner->ebdTryLast->getHndIndex() : EHblkDsc::NO_ENCLOSING_INDEX; } } else if (ehOuter->ebdTryLast == ehInner->ebdHndLast) { // Case (2) handler nested in try. foundMatchingLastBlock = true; INDEBUG(innerType = "handler"; outerType = "try";) insertAfterBlk = ehOuter->ebdTryLast; lastBlockPtrToCompare = insertAfterBlk; assert(ehInner->ebdHndLast->getHndIndex() == ehInnerIndex); nextHndIndex = ehInner->ebdEnclosingHndIndex; } else { // No "last" pointers match! } if (foundMatchingLastBlock) { // The outer might be part of a new set of mutual protect regions (if it isn't part of one already). mutualTryBeg = ehOuter->ebdTryBeg; mutualTryLast = ehOuter->ebdTryLast; } } else { nextHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; // unused, since the outer block is a handler region. // The outer (enclosing) region is a handler (note that it can't be a filter; there is no nesting // within a filter). if (ehOuter->ebdHndLast == ehInner->ebdTryLast) { // Case (3) try nested in handler. foundMatchingLastBlock = true; INDEBUG(innerType = "try"; outerType = "handler";) insertAfterBlk = ehOuter->ebdHndLast; lastBlockPtrToCompare = insertAfterBlk; assert(ehInner->ebdTryLast->getTryIndex() == ehInnerIndex); nextTryIndex = ehInner->ebdEnclosingTryIndex; } else if (ehOuter->ebdHndLast == ehInner->ebdHndLast) { // Case (4) handler nested in handler. foundMatchingLastBlock = true; INDEBUG(innerType = "handler"; outerType = "handler";) insertAfterBlk = ehOuter->ebdHndLast; lastBlockPtrToCompare = insertAfterBlk; nextTryIndex = ehInner->ebdTryLast->hasTryIndex() ? ehInner->ebdTryLast->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX; } else { // No "last" pointers match! } } while (foundMatchingLastBlock) { assert(lastBlockPtrToCompare != nullptr); assert(insertAfterBlk != nullptr); assert(ehOuterIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(ehOuter != nullptr); // Add a normalization block if (insertNormalizationBlock) { // Add a new last block for 'ehOuter' that will be outside the EH region with which it encloses and // shares a 'last' pointer BasicBlock* newLast = bbNewBasicBlock(BBJ_NONE); assert(insertAfterBlk != nullptr); fgInsertBBafter(insertAfterBlk, newLast); #ifdef DEBUG if (verbose) { printf( "last %s block for EH#%u and last %s block for EH#%u are same block; inserted new " FMT_BB " after " FMT_BB " as new last %s block for EH#%u.\n", outerType, ehOuterIndex, innerType, ehInnerIndex, newLast->bbNum, insertAfterBlk->bbNum, outerType, ehOuterIndex); } #endif // DEBUG if (outerIsTryRegion) { ehOuter->ebdTryLast = newLast; newLast->setTryIndex(ehOuterIndex); if (nextHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) { newLast->clearHndIndex(); } else { newLast->setHndIndex(nextHndIndex); } } else { ehOuter->ebdHndLast = newLast; if (nextTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { newLast->clearTryIndex(); } else { newLast->setTryIndex(nextTryIndex); } newLast->setHndIndex(ehOuterIndex); } newLast->bbCatchTyp = BBCT_NONE; // bbCatchTyp is only set on the first block of a handler, which is this not newLast->bbCodeOffs = insertAfterBlk->bbCodeOffsEnd; newLast->bbCodeOffsEnd = newLast->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead? newLast->inheritWeight(insertAfterBlk); newLast->bbFlags |= BBF_INTERNAL; // The new block (a fall-through block) is a new predecessor. if (fgCheapPredsValid) { fgAddCheapPred(newLast, insertAfterBlk); } // Move the insert pointer. More enclosing equivalent 'last' blocks will be inserted after this. insertAfterBlk = newLast; modified = true; #ifdef DEBUG if (verbose) // Normally this is way too verbose, but it is useful for debugging { printf("*************** fgNormalizeEH() made a change\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } // Now find the next outer enclosing EH region and see if it also shares the last block. foundMatchingLastBlock = false; // assume nothing will match ehInner = ehOuter; ehInnerIndex = ehOuterIndex; innerIsTryRegion = outerIsTryRegion; ehOuterIndex = ehOuter->ebdGetEnclosingRegionIndex(&outerIsTryRegion); // Loop outwards in the EH nesting. if (ehOuterIndex != EHblkDsc::NO_ENCLOSING_INDEX) { // There are more enclosing regions; check for equivalent 'last' pointers. INDEBUG(innerType = outerType; outerType = "";) ehOuter = ehGetDsc(ehOuterIndex); insertNormalizationBlock = true; // assume it's not mutual protect if (outerIsTryRegion) { nextTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; // unused, since the outer block is a 'try' region. // The outer (enclosing) region is a 'try' if (ehOuter->ebdTryLast == lastBlockPtrToCompare) { // Case (1) and (2): try or handler nested in try. foundMatchingLastBlock = true; INDEBUG(outerType = "try";) if (innerIsTryRegion && ehOuter->ebdIsSameTry(mutualTryBeg, mutualTryLast)) { // We can't touch this 'try', since it's mutual protect. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("Mutual protect regions EH#%u and EH#%u; leaving identical 'try' last " "blocks.\n", ehOuterIndex, ehInnerIndex); } #endif // DEBUG insertNormalizationBlock = false; // We still need to update the 'last' pointer, in case someone inserted a normalization // block before the start of the mutual protect 'try' region. ehOuter->ebdTryLast = insertAfterBlk; } else { if (innerIsTryRegion) { // Case (1) try nested in try. nextHndIndex = ehInner->ebdTryLast->hasHndIndex() ? ehInner->ebdTryLast->getHndIndex() : EHblkDsc::NO_ENCLOSING_INDEX; } else { // Case (2) handler nested in try. assert(ehInner->ebdHndLast->getHndIndex() == ehInnerIndex); nextHndIndex = ehInner->ebdEnclosingHndIndex; } } // The outer might be part of a new set of mutual protect regions (if it isn't part of one // already). mutualTryBeg = ehOuter->ebdTryBeg; mutualTryLast = ehOuter->ebdTryLast; } } else { nextHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; // unused, since the outer block is a handler region. // The outer (enclosing) region is a handler (note that it can't be a filter; there is no // nesting within a filter). if (ehOuter->ebdHndLast == lastBlockPtrToCompare) { // Case (3) and (4): try nested in try or handler. foundMatchingLastBlock = true; INDEBUG(outerType = "handler";) if (innerIsTryRegion) { // Case (3) try nested in handler. assert(ehInner->ebdTryLast->getTryIndex() == ehInnerIndex); nextTryIndex = ehInner->ebdEnclosingTryIndex; } else { // Case (4) handler nested in handler. nextTryIndex = ehInner->ebdTryLast->hasTryIndex() ? ehInner->ebdTryLast->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX; } } } } // If we get to here and foundMatchingLastBlock is false, then the inner and outer region don't share // any 'last' blocks, so we're done. Note that we could have a situation like this: // // try4 try3 try2 try1 // |---- | | | BB01 // | |---- | | BB02 // | | |---- | BB03 // | | | |----- BB04 // | | |----- |----- BB05 // |---- |------------------- BB06 // // (Thus, try1 & try2 end at BB05, and are nested inside try3 & try4, which both end at BB06.) // In this case, we'll process try1 and try2, then break out. Later, as we iterate through the EH table, // we'll get to try3 and process it and try4. } // end while (foundMatchingLastBlock) } // if (ehOuterIndex != EHblkDsc::NO_ENCLOSING_INDEX) } // EH table iteration return modified; } /*****************************************************************************/ #ifdef DEBUG void Compiler::dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause) { printf("EH clause #%u:\n", num); printf(" Flags: 0x%x", clause.Flags); // Note: the flags field is kind of weird. It should be compared for equality // to determine the type of clause, even though it looks like a bitfield. In // Particular, CORINFO_EH_CLAUSE_NONE is zero, so you can't use "&" to check it. const DWORD CORINFO_EH_CLAUSE_TYPE_MASK = 0x7; switch (clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK) { case CORINFO_EH_CLAUSE_NONE: printf(" (catch)"); break; case CORINFO_EH_CLAUSE_FILTER: printf(" (filter)"); break; case CORINFO_EH_CLAUSE_FINALLY: printf(" (finally)"); break; case CORINFO_EH_CLAUSE_FAULT: printf(" (fault)"); break; default: printf(" (UNKNOWN type %u!)", clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK); break; } if (clause.Flags & ~CORINFO_EH_CLAUSE_TYPE_MASK) { printf(" (extra unknown bits: 0x%x)", clause.Flags & ~CORINFO_EH_CLAUSE_TYPE_MASK); } printf("\n"); printf(" TryOffset: 0x%x\n", clause.TryOffset); printf(" TryLength: 0x%x\n", clause.TryLength); printf(" HandlerOffset: 0x%x\n", clause.HandlerOffset); printf(" HandlerLength: 0x%x\n", clause.HandlerLength); if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { printf(" FilterOffset: 0x%x\n", clause.FilterOffset); } else { printf(" ClassToken: 0x%x\n", clause.ClassToken); } } void Compiler::dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause) { if (opts.dspDiffable) { /* (( brace matching editor workaround to compensate for the following line */ printf("EH#%u: try [%s..%s) handled by [%s..%s) ", num, GetEmitter()->emitOffsetToLabel(clause.TryOffset), GetEmitter()->emitOffsetToLabel(clause.TryLength), GetEmitter()->emitOffsetToLabel(clause.HandlerOffset), GetEmitter()->emitOffsetToLabel(clause.HandlerLength)); } else { /* (( brace matching editor workaround to compensate for the following line */ printf("EH#%u: try [%04X..%04X) handled by [%04X..%04X) ", num, dspOffset(clause.TryOffset), dspOffset(clause.TryLength), dspOffset(clause.HandlerOffset), dspOffset(clause.HandlerLength)); } // Note: the flags field is kind of weird. It should be compared for equality // to determine the type of clause, even though it looks like a bitfield. In // Particular, CORINFO_EH_CLAUSE_NONE is zero, so you can "&" to check it. // You do need to mask off the bits, though, because CORINFO_EH_CLAUSE_DUPLICATE // is and'ed in. const DWORD CORINFO_EH_CLAUSE_TYPE_MASK = 0x7; switch (clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK) { case CORINFO_EH_CLAUSE_NONE: printf("(class: %04X)", clause.ClassToken); break; case CORINFO_EH_CLAUSE_FILTER: if (opts.dspDiffable) { /* ( brace matching editor workaround to compensate for the following line */ printf("filter at [%s..%s)", GetEmitter()->emitOffsetToLabel(clause.ClassToken), GetEmitter()->emitOffsetToLabel(clause.HandlerOffset)); } else { /* ( brace matching editor workaround to compensate for the following line */ printf("filter at [%04X..%04X)", dspOffset(clause.ClassToken), dspOffset(clause.HandlerOffset)); } break; case CORINFO_EH_CLAUSE_FINALLY: printf("(finally)"); break; case CORINFO_EH_CLAUSE_FAULT: printf("(fault)"); break; default: printf("(UNKNOWN type %u!)", clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK); assert(!"unknown type"); break; } if ((clause.TryOffset == clause.TryLength) && (clause.TryOffset == clause.HandlerOffset) && ((clause.Flags & (CORINFO_EH_CLAUSE_DUPLICATE | CORINFO_EH_CLAUSE_FINALLY)) == (CORINFO_EH_CLAUSE_DUPLICATE | CORINFO_EH_CLAUSE_FINALLY))) { printf(" cloned finally"); } else if (clause.Flags & CORINFO_EH_CLAUSE_DUPLICATE) { printf(" duplicated"); } else if (clause.Flags & CORINFO_EH_CLAUSE_SAMETRY) { printf(" same try"); } printf("\n"); } /*****************************************************************************/ void Compiler::fgVerifyHandlerTab() { if (compIsForInlining()) { // We don't inline functions with EH. Don't bother verifying the EH table in the inlinee Compiler. return; } if (compHndBBtabCount == 0) { return; } // Did we do the normalization that prevents the first block of a handler from being a 'try' block (case 1)? bool handlerBegIsTryBegNormalizationDone = fgNormalizeEHDone; // Did we do the normalization that prevents multiple EH regions (namely, 'try' blocks) from starting on the same // block (case 2)? bool multipleBegBlockNormalizationDone = fgNormalizeEHDone; // Did we do the normalization that prevents multiple EH regions ('try' or handler blocks) from ending on the same // block (case 3)? bool multipleLastBlockNormalizationDone = false; // Currently disabled assert(compHndBBtabCount <= compHndBBtabAllocCount); unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { assert(HBtab->ebdTryBeg != nullptr); assert(HBtab->ebdTryLast != nullptr); assert(HBtab->ebdHndBeg != nullptr); assert(HBtab->ebdHndLast != nullptr); assert(HBtab->ebdTryBeg->bbFlags & BBF_TRY_BEG); assert(HBtab->ebdTryBeg->bbFlags & BBF_DONT_REMOVE); assert(HBtab->ebdHndBeg->bbFlags & BBF_DONT_REMOVE); assert((HBtab->ebdTryBeg->bbFlags & BBF_REMOVED) == 0); assert((HBtab->ebdTryLast->bbFlags & BBF_REMOVED) == 0); assert((HBtab->ebdHndBeg->bbFlags & BBF_REMOVED) == 0); assert((HBtab->ebdHndLast->bbFlags & BBF_REMOVED) == 0); if (HBtab->HasFilter()) { assert(HBtab->ebdFilter != nullptr); assert(HBtab->ebdFilter->bbFlags & BBF_DONT_REMOVE); assert((HBtab->ebdFilter->bbFlags & BBF_REMOVED) == 0); } #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { assert(HBtab->ebdHndBeg->bbFlags & BBF_FUNCLET_BEG); if (HBtab->HasFilter()) { assert(HBtab->ebdFilter->bbFlags & BBF_FUNCLET_BEG); } } #endif // FEATURE_EH_FUNCLETS } // I want to assert things about the relative ordering of blocks in the block list using // block number, but I don't want to renumber the basic blocks, which might cause a difference // between debug and non-debug code paths. So, create a renumbered block mapping: map the // existing block number to a renumbered block number that is ordered by block list order. unsigned bbNumMax = impInlineRoot()->fgBBNumMax; // blockNumMap[old block number] => new block number size_t blockNumBytes = (bbNumMax + 1) * sizeof(unsigned); unsigned* blockNumMap = (unsigned*)_alloca(blockNumBytes); memset(blockNumMap, 0, blockNumBytes); unsigned newBBnum = 1; for (BasicBlock* const block : Blocks()) { assert((block->bbFlags & BBF_REMOVED) == 0); assert(1 <= block->bbNum && block->bbNum <= bbNumMax); assert(blockNumMap[block->bbNum] == 0); // If this fails, we have two blocks with the same block number. blockNumMap[block->bbNum] = newBBnum++; } // Note that there may be some blockNumMap[x] == 0, for a block number 'x' that has been deleted, if the blocks // haven't been renumbered since the deletion. #if 0 // Useful for debugging, but don't want to put this in the dump all the time if (verbose) { printf("fgVerifyHandlerTab block number map: BB current => BB new\n"); for (unsigned i = 0; i <= bbNumMax; i++) { if (blockNumMap[i] != 0) { printf(FMT_BB " => " FMT_BB "\n", i, blockNumMap[i]); } } } #endif // To verify that bbCatchTyp is set properly on all blocks, and that some BBF_* flags are only set on the first // block of 'try' or handlers, create two bool arrays indexed by block number: one for the set of blocks that // are the beginning blocks of 'try' regions, and one for blocks that are the beginning of handlers (including // filters). Note that since this checking function runs before EH normalization, we have to handle the case // where blocks can be both the beginning of a 'try' as well as the beginning of a handler. After we've iterated // over the EH table, loop over all blocks and verify that only handler begin blocks have bbCatchTyp == BBCT_NONE, // and some other things. size_t blockBoolSetBytes = (bbNumMax + 1) * sizeof(bool); bool* blockTryBegSet = (bool*)_alloca(blockBoolSetBytes); bool* blockHndBegSet = (bool*)_alloca(blockBoolSetBytes); for (unsigned i = 0; i <= bbNumMax; i++) { blockTryBegSet[i] = false; blockHndBegSet[i] = false; } #if defined(FEATURE_EH_FUNCLETS) bool isLegalFirstFunclet = false; unsigned bbNumFirstFunclet = 0; if (fgFuncletsCreated) { // Assert some things about the "first funclet block" pointer. assert(fgFirstFuncletBB != nullptr); assert((fgFirstFuncletBB->bbFlags & BBF_REMOVED) == 0); bbNumFirstFunclet = blockNumMap[fgFirstFuncletBB->bbNum]; assert(bbNumFirstFunclet != 0); } else { assert(fgFirstFuncletBB == nullptr); } #endif // FEATURE_EH_FUNCLETS for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { unsigned bbNumTryBeg = blockNumMap[HBtab->ebdTryBeg->bbNum]; unsigned bbNumTryLast = blockNumMap[HBtab->ebdTryLast->bbNum]; unsigned bbNumHndBeg = blockNumMap[HBtab->ebdHndBeg->bbNum]; unsigned bbNumHndLast = blockNumMap[HBtab->ebdHndLast->bbNum]; unsigned bbNumFilter = 0; // This should never get used except under "if (HBtab->HasFilter())" if (HBtab->HasFilter()) { bbNumFilter = blockNumMap[HBtab->ebdFilter->bbNum]; } // Assert that the EH blocks are in the main block list assert(bbNumTryBeg != 0); assert(bbNumTryLast != 0); assert(bbNumHndBeg != 0); assert(bbNumHndLast != 0); if (HBtab->HasFilter()) { assert(bbNumFilter != 0); } // Check relative ordering of the 'beg' and 'last' blocks. Note that in IL (and in our initial block list) // there is no required ordering between the 'try' and handler regions: the handler might come first! // After funclets have been created, all the handler blocks come in sequence at the end of the // function (this is checked below, with checks for the first funclet block). Note that a handler // might contain a nested 'try', which will also then be in the "funclet region". // Also, the 'try' and handler regions do not need to be adjacent. assert(bbNumTryBeg <= bbNumTryLast); assert(bbNumHndBeg <= bbNumHndLast); if (HBtab->HasFilter()) { // Since the filter block must be different from the handler, this condition is "<", not "<=". assert(bbNumFilter < bbNumHndBeg); } // The EH regions are disjoint: the handler (including the filter, if applicable) is strictly before or after // the 'try'. if (HBtab->HasFilter()) { assert((bbNumHndLast < bbNumTryBeg) || (bbNumTryLast < bbNumFilter)); } else { assert((bbNumHndLast < bbNumTryBeg) || (bbNumTryLast < bbNumHndBeg)); } #if defined(FEATURE_EH_FUNCLETS) // If funclets have been created, check the first funclet block. The first funclet block must be the // first block of a filter or handler. All filter/handler blocks must come after it. // Note that 'try' blocks might come either before or after it. If after, they will be nested within // a handler. If before, they might be nested within a try, but not within a handler. if (fgFuncletsCreated) { if (bbNumTryLast < bbNumFirstFunclet) { // This EH region can't be nested in a handler, or else it would be in the funclet region. assert(HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX); } else { // The last block of the 'try' is in the funclet region; make sure the whole thing is. if (multipleBegBlockNormalizationDone) { assert(bbNumTryBeg > bbNumFirstFunclet); // ">" because a 'try' can't be the first block of a // handler (by EH normalization). } else { assert(bbNumTryBeg >= bbNumFirstFunclet); } // This EH region must be nested in a handler. assert(HBtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX); } if (HBtab->HasFilter()) { assert(bbNumFirstFunclet <= bbNumFilter); if (fgFirstFuncletBB == HBtab->ebdFilter) { assert(!isLegalFirstFunclet); // We can't have already found a matching block for the first funclet. isLegalFirstFunclet = true; } } else { assert(bbNumFirstFunclet <= bbNumHndBeg); if (fgFirstFuncletBB == HBtab->ebdHndBeg) { assert(!isLegalFirstFunclet); // We can't have already found a matching block for the first funclet. isLegalFirstFunclet = true; } } } #endif // FEATURE_EH_FUNCLETS // Check the 'try' region nesting, using ebdEnclosingTryIndex. // Only check one level of nesting, since we'll check the outer EH region (and its nesting) when we get to it // later. if (HBtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) { assert(HBtab->ebdEnclosingTryIndex > XTnum); // The enclosing region must come after this one in the table EHblkDsc* HBtabOuter = ehGetDsc(HBtab->ebdEnclosingTryIndex); unsigned bbNumOuterTryBeg = blockNumMap[HBtabOuter->ebdTryBeg->bbNum]; unsigned bbNumOuterTryLast = blockNumMap[HBtabOuter->ebdTryLast->bbNum]; // A few basic asserts (that will also get covered later, when this outer region gets handled). assert(bbNumOuterTryBeg != 0); assert(bbNumOuterTryLast != 0); assert(bbNumOuterTryBeg <= bbNumOuterTryLast); if (!EHblkDsc::ebdIsSameTry(HBtab, HBtabOuter)) { // If it's not a mutually protect region, then the outer 'try' must completely lexically contain all the // blocks in the nested EH region. However, if funclets have been created, this is no longer true, since // this 'try' might be in a handler that is pulled out to the funclet region, while the outer 'try' // remains in the main function region. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // If both the 'try' region and the outer 'try' region are in the main function area, then we can // do the normal nesting check. Otherwise, it's harder to find a useful assert to make about their // relationship. if ((bbNumTryLast < bbNumFirstFunclet) && (bbNumOuterTryLast < bbNumFirstFunclet)) { if (multipleBegBlockNormalizationDone) { assert(bbNumOuterTryBeg < bbNumTryBeg); // Two 'try' regions can't start at the same // block (by EH normalization). } else { assert(bbNumOuterTryBeg <= bbNumTryBeg); } if (multipleLastBlockNormalizationDone) { assert(bbNumTryLast < bbNumOuterTryLast); // Two 'try' regions can't end at the same block //(by EH normalization). } else { assert(bbNumTryLast <= bbNumOuterTryLast); } } // With funclets, all we can say about the handler blocks is that they are disjoint from the // enclosing try. assert((bbNumHndLast < bbNumOuterTryBeg) || (bbNumOuterTryLast < bbNumHndBeg)); } else #endif // FEATURE_EH_FUNCLETS { if (multipleBegBlockNormalizationDone) { assert(bbNumOuterTryBeg < bbNumTryBeg); // Two 'try' regions can't start at the same block // (by EH normalization). } else { assert(bbNumOuterTryBeg <= bbNumTryBeg); } assert(bbNumOuterTryBeg < bbNumHndBeg); // An inner handler can never start at the same // block as an outer 'try' (by IL rules). if (multipleLastBlockNormalizationDone) { // An inner EH region can't share a 'last' block with the outer 'try' (by EH normalization). assert(bbNumTryLast < bbNumOuterTryLast); assert(bbNumHndLast < bbNumOuterTryLast); } else { assert(bbNumTryLast <= bbNumOuterTryLast); assert(bbNumHndLast <= bbNumOuterTryLast); } } } } // Check the handler region nesting, using ebdEnclosingHndIndex. // Only check one level of nesting, since we'll check the outer EH region (and its nesting) when we get to it // later. if (HBtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) { assert(HBtab->ebdEnclosingHndIndex > XTnum); // The enclosing region must come after this one in the table EHblkDsc* HBtabOuter = ehGetDsc(HBtab->ebdEnclosingHndIndex); unsigned bbNumOuterHndBeg = blockNumMap[HBtabOuter->ebdHndBeg->bbNum]; unsigned bbNumOuterHndLast = blockNumMap[HBtabOuter->ebdHndLast->bbNum]; // A few basic asserts (that will also get covered later, when this outer regions gets handled). assert(bbNumOuterHndBeg != 0); assert(bbNumOuterHndLast != 0); assert(bbNumOuterHndBeg <= bbNumOuterHndLast); // The outer handler must completely contain all the blocks in the EH region nested within it. However, if // funclets have been created, it's harder to make any relationship asserts about the order of nested // handlers, which also have been made into funclets. #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { if (handlerBegIsTryBegNormalizationDone) { assert(bbNumOuterHndBeg < bbNumTryBeg); // An inner 'try' can't start at the same block as an // outer handler (by EH normalization). } else { assert(bbNumOuterHndBeg <= bbNumTryBeg); } if (multipleLastBlockNormalizationDone) { assert(bbNumTryLast < bbNumOuterHndLast); // An inner 'try' can't end at the same block as an // outer handler (by EH normalization). } else { assert(bbNumTryLast <= bbNumOuterHndLast); } // With funclets, all we can say about the handler blocks is that they are disjoint from the enclosing // handler. assert((bbNumHndLast < bbNumOuterHndBeg) || (bbNumOuterHndLast < bbNumHndBeg)); } else #endif // FEATURE_EH_FUNCLETS { if (handlerBegIsTryBegNormalizationDone) { assert(bbNumOuterHndBeg < bbNumTryBeg); // An inner 'try' can't start at the same block as an // outer handler (by EH normalization). } else { assert(bbNumOuterHndBeg <= bbNumTryBeg); } assert(bbNumOuterHndBeg < bbNumHndBeg); // An inner handler can never start at the same block // as an outer handler (by IL rules). if (multipleLastBlockNormalizationDone) { // An inner EH region can't share a 'last' block with the outer handler (by EH normalization). assert(bbNumTryLast < bbNumOuterHndLast); assert(bbNumHndLast < bbNumOuterHndLast); } else { assert(bbNumTryLast <= bbNumOuterHndLast); assert(bbNumHndLast <= bbNumOuterHndLast); } } } // Set up blockTryBegSet and blockHndBegSet. // We might want to have this assert: // if (fgNormalizeEHDone) assert(!blockTryBegSet[HBtab->ebdTryBeg->bbNum]); // But we can't, because if we have mutually-protect 'try' regions, we'll see exactly the same tryBeg twice // (or more). blockTryBegSet[HBtab->ebdTryBeg->bbNum] = true; assert(!blockHndBegSet[HBtab->ebdHndBeg->bbNum]); blockHndBegSet[HBtab->ebdHndBeg->bbNum] = true; if (HBtab->HasFilter()) { assert(HBtab->ebdFilter->bbCatchTyp == BBCT_FILTER); assert(!blockHndBegSet[HBtab->ebdFilter->bbNum]); blockHndBegSet[HBtab->ebdFilter->bbNum] = true; } // Check the block bbCatchTyp for this EH region's filter and handler. if (HBtab->HasFilter()) { assert(HBtab->ebdHndBeg->bbCatchTyp == BBCT_FILTER_HANDLER); } else if (HBtab->HasCatchHandler()) { assert((HBtab->ebdHndBeg->bbCatchTyp != BBCT_NONE) && (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FAULT) && (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FINALLY) && (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FILTER) && (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FILTER_HANDLER)); } else if (HBtab->HasFaultHandler()) { assert(HBtab->ebdHndBeg->bbCatchTyp == BBCT_FAULT); } else if (HBtab->HasFinallyHandler()) { assert(HBtab->ebdHndBeg->bbCatchTyp == BBCT_FINALLY); } } #if defined(FEATURE_EH_FUNCLETS) assert(!fgFuncletsCreated || isLegalFirstFunclet); #endif // FEATURE_EH_FUNCLETS // Figure out what 'try' and handler index each basic block should have, // and check the blocks against that. This depends on the more nested EH // clauses appearing first. For duplicate clauses, we use the duplicate // clause 'try' region to set the try index, since a handler that has // been pulled out of an enclosing 'try' wouldn't have had its try index // otherwise set. The duplicate clause handler is truly a duplicate of // a previously processed handler, so we ignore it. BasicBlock* block; size_t blockIndexBytes = (bbNumMax + 1) * sizeof(unsigned short); unsigned short* blockTryIndex = (unsigned short*)_alloca(blockIndexBytes); unsigned short* blockHndIndex = (unsigned short*)_alloca(blockIndexBytes); memset(blockTryIndex, 0, blockIndexBytes); memset(blockHndIndex, 0, blockIndexBytes); for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { BasicBlock* blockEnd; for (block = HBtab->ebdTryBeg, blockEnd = HBtab->ebdTryLast->bbNext; block != blockEnd; block = block->bbNext) { if (blockTryIndex[block->bbNum] == 0) { blockTryIndex[block->bbNum] = (unsigned short)(XTnum + 1); } } for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), blockEnd = HBtab->ebdHndLast->bbNext; block != blockEnd; block = block->bbNext) { if (blockHndIndex[block->bbNum] == 0) { blockHndIndex[block->bbNum] = (unsigned short)(XTnum + 1); } } } #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // Mark all the funclet 'try' indices correctly, since they do not exist in the linear 'try' region that // we looped over above. This is similar to duplicate clause logic, but we only need to look at the most // nested enclosing try index, not the entire set of enclosing try indices, since that is what we store // on the block. for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { unsigned enclosingTryIndex = ehTrueEnclosingTryIndexIL(XTnum); // find the true enclosing try index, // ignoring 'mutual protect' trys if (enclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) { // The handler funclet for 'XTnum' has a try index of 'enclosingTryIndex' (at least, the parts of the // funclet that don't already have a more nested 'try' index because a 'try' is nested within the // handler). BasicBlock* blockEnd; for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), blockEnd = HBtab->ebdHndLast->bbNext; block != blockEnd; block = block->bbNext) { if (blockTryIndex[block->bbNum] == 0) { blockTryIndex[block->bbNum] = (unsigned short)(enclosingTryIndex + 1); } } } } } #endif // FEATURE_EH_FUNCLETS // Make sure that all blocks have the right index, including those blocks that should have zero (no EH region). for (BasicBlock* const block : Blocks()) { assert(block->bbTryIndex == blockTryIndex[block->bbNum]); assert(block->bbHndIndex == blockHndIndex[block->bbNum]); // Also, since we're walking the blocks, check that all blocks we didn't mark as EH handler 'begin' blocks // already have bbCatchTyp set properly. if (!blockHndBegSet[block->bbNum]) { assert(block->bbCatchTyp == BBCT_NONE); #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // Make sure blocks that aren't the first block of a funclet do not have the BBF_FUNCLET_BEG flag set. assert((block->bbFlags & BBF_FUNCLET_BEG) == 0); } #endif // FEATURE_EH_FUNCLETS } // Only the first block of 'try' regions should have BBF_TRY_BEG set. if (!blockTryBegSet[block->bbNum]) { assert((block->bbFlags & BBF_TRY_BEG) == 0); } } } void Compiler::fgDispHandlerTab() { printf("\n*************** Exception Handling table"); if (compHndBBtabCount == 0) { printf(" is empty\n"); return; } printf("\nindex "); #if !defined(FEATURE_EH_FUNCLETS) printf("nest, "); #endif // !FEATURE_EH_FUNCLETS printf("eTry, eHnd\n"); unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { HBtab->DispEntry(XTnum); } } #endif // DEBUG /*****************************************************************************/ /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX "Compiler" functions: EH tree verification XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * The following code checks the following rules for the EH table: * 1. Overlapping of try blocks not allowed. * 2. Handler blocks cannot be shared between different try blocks. * 3. Try blocks with Finally or Fault blocks cannot have other handlers. * 4. If block A contains block B, A should also contain B's try/filter/handler. * 5. A block cannot contain it's related try/filter/handler. * 6. Nested block must appear before containing block * */ void Compiler::verInitEHTree(unsigned numEHClauses) { ehnNext = new (this, CMK_BasicBlock) EHNodeDsc[numEHClauses * 3]; ehnTree = nullptr; } /* Inserts the try, handler and filter (optional) clause information in a tree structure * in order to catch incorrect eh formatting (e.g. illegal overlaps, incorrect order) */ void Compiler::verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab) { EHNodeDsc* tryNode = ehnNext++; EHNodeDsc* handlerNode = ehnNext++; EHNodeDsc* filterNode = nullptr; // optional tryNode->ehnSetTryNodeType(); tryNode->ehnStartOffset = clause->TryOffset; tryNode->ehnEndOffset = clause->TryOffset + clause->TryLength - 1; tryNode->ehnHandlerNode = handlerNode; if (clause->Flags & CORINFO_EH_CLAUSE_FINALLY) { handlerNode->ehnSetFinallyNodeType(); } else if (clause->Flags & CORINFO_EH_CLAUSE_FAULT) { handlerNode->ehnSetFaultNodeType(); } else { handlerNode->ehnSetHandlerNodeType(); } handlerNode->ehnStartOffset = clause->HandlerOffset; handlerNode->ehnEndOffset = clause->HandlerOffset + clause->HandlerLength - 1; handlerNode->ehnTryNode = tryNode; if (clause->Flags & CORINFO_EH_CLAUSE_FILTER) { filterNode = ehnNext++; filterNode->ehnStartOffset = clause->FilterOffset; BasicBlock* blk = handlerTab->BBFilterLast(); filterNode->ehnEndOffset = blk->bbCodeOffsEnd - 1; noway_assert(filterNode->ehnEndOffset != 0); filterNode->ehnSetFilterNodeType(); filterNode->ehnTryNode = tryNode; tryNode->ehnFilterNode = filterNode; } verInsertEhNodeInTree(&ehnTree, tryNode); verInsertEhNodeInTree(&ehnTree, handlerNode); if (filterNode) { verInsertEhNodeInTree(&ehnTree, filterNode); } } /* The root node could be changed by this method. node is inserted to (a) right of root (root.right <-- node) (b) left of root (node.right <-- root; node becomes root) (c) child of root (root.child <-- node) (d) parent of root (node.child <-- root; node becomes root) (e) equivalent of root (root.equivalent <-- node) such that siblings are ordered from left to right child parent relationship and equivalence relationship are not violated Here is a list of all possible cases Case 1 2 3 4 5 6 7 8 9 10 11 12 13 | | | | | | | | | | .......|.|.|.|..................... [ root start ] ..... | | | | | | | | | | | | | | r| | | | | | | | o| | | | | | o| | | | | | t| | | | | | | | | | | | | | | | | | | | | |..........|.|.|.|.....|........|.. [ root end ] ........ | | | | | | | | | | | | | | |<-- - - - n o d e - - - -->| Case Operation -------------- 1 (b) 2 Error 3 Error 4 (d) 5 (d) 6 (d) 7 Error 8 Error 9 (a) 10 (c) 11 (c) 12 (c) 13 (e) */ void Compiler::verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node) { unsigned nStart = node->ehnStartOffset; unsigned nEnd = node->ehnEndOffset; if (nStart > nEnd) { BADCODE("start offset greater or equal to end offset"); } node->ehnNext = nullptr; node->ehnChild = nullptr; node->ehnEquivalent = nullptr; while (true) { if (*ppRoot == nullptr) { *ppRoot = node; break; } unsigned rStart = (*ppRoot)->ehnStartOffset; unsigned rEnd = (*ppRoot)->ehnEndOffset; if (nStart < rStart) { // Case 1 if (nEnd < rStart) { // Left sibling node->ehnNext = *ppRoot; *ppRoot = node; return; } // Case 2, 3 if (nEnd < rEnd) { //[Error] BADCODE("Overlapping try regions"); } // Case 4, 5 //[Parent] verInsertEhNodeParent(ppRoot, node); return; } // Cases 6 - 13 (nStart >= rStart) if (nEnd > rEnd) { // Case 6, 7, 8, 9 // Case 9 if (nStart > rEnd) { //[RightSibling] // Recurse with Root.Sibling as the new root ppRoot = &((*ppRoot)->ehnNext); continue; } // Case 6 if (nStart == rStart) { //[Parent] if (node->ehnIsTryBlock() || (*ppRoot)->ehnIsTryBlock()) { verInsertEhNodeParent(ppRoot, node); return; } // non try blocks are not allowed to start at the same offset BADCODE("Handlers start at the same offset"); } // Case 7, 8 BADCODE("Overlapping try regions"); } // Case 10-13 (nStart >= rStart && nEnd <= rEnd) if ((nStart != rStart) || (nEnd != rEnd)) { // Cases 10,11,12 //[Child] if ((*ppRoot)->ehnIsTryBlock()) { BADCODE("Inner try appears after outer try in exception handling table"); } else { // We have an EH clause nested within a handler, but the parent // handler clause came first in the table. The rest of the compiler // doesn't expect this, so sort the EH table. fgNeedToSortEHTable = true; // Case 12 (nStart == rStart) // non try blocks are not allowed to start at the same offset if ((nStart == rStart) && !node->ehnIsTryBlock()) { BADCODE("Handlers start at the same offset"); } // check this! ppRoot = &((*ppRoot)->ehnChild); continue; } } // Case 13 //[Equivalent] if (!node->ehnIsTryBlock() && !(*ppRoot)->ehnIsTryBlock()) { BADCODE("Handlers cannot be shared"); } if (!node->ehnIsTryBlock() || !(*ppRoot)->ehnIsTryBlock()) { // Equivalent is only allowed for try bodies // If one is a handler, this means the nesting is wrong BADCODE("Handler and try with the same offset"); } node->ehnEquivalent = node->ehnNext = *ppRoot; // check that the corresponding handler is either a catch handler // or a filter if (node->ehnHandlerNode->ehnIsFaultBlock() || node->ehnHandlerNode->ehnIsFinallyBlock() || (*ppRoot)->ehnHandlerNode->ehnIsFaultBlock() || (*ppRoot)->ehnHandlerNode->ehnIsFinallyBlock()) { BADCODE("Try block with multiple non-filter/non-handler blocks"); } break; } } /********************************************************************** * Make node the parent of *ppRoot. All siblings of *ppRoot that are * fully or partially nested in node remain siblings of *ppRoot */ void Compiler::verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node) { noway_assert(node->ehnNext == nullptr); noway_assert(node->ehnChild == nullptr); // Root is nested in Node noway_assert(node->ehnStartOffset <= (*ppRoot)->ehnStartOffset); noway_assert(node->ehnEndOffset >= (*ppRoot)->ehnEndOffset); // Root is not the same as Node noway_assert(node->ehnStartOffset != (*ppRoot)->ehnStartOffset || node->ehnEndOffset != (*ppRoot)->ehnEndOffset); if (node->ehnIsFilterBlock()) { BADCODE("Protected block appearing within filter block"); } EHNodeDsc* lastChild = nullptr; EHNodeDsc* sibling = (*ppRoot)->ehnNext; while (sibling) { // siblings are ordered left to right, largest right. // nodes have a width of at least one. // Hence sibling start will always be after Node start. noway_assert(sibling->ehnStartOffset > node->ehnStartOffset); // (1) // disjoint if (sibling->ehnStartOffset > node->ehnEndOffset) { break; } // partial containment. if (sibling->ehnEndOffset > node->ehnEndOffset) // (2) { BADCODE("Overlapping try regions"); } // else full containment (follows from (1) and (2)) lastChild = sibling; sibling = sibling->ehnNext; } // All siblings of Root up to and including lastChild will continue to be // siblings of Root (and children of Node). The node to the right of // lastChild will become the first sibling of Node. // if (lastChild) { // Node has more than one child including Root node->ehnNext = lastChild->ehnNext; lastChild->ehnNext = nullptr; } else { // Root is the only child of Node node->ehnNext = (*ppRoot)->ehnNext; (*ppRoot)->ehnNext = nullptr; } node->ehnChild = *ppRoot; *ppRoot = node; } /***************************************************************************** * Checks the following two conditions: * 1) If block A contains block B, A should also contain B's try/filter/handler. * 2) A block cannot contain its related try/filter/handler. * Both these conditions are checked by making sure that all the blocks for an * exception clause are at the same level. * The algorithm is: for each exception clause, determine the first block and * search through the next links for its corresponding try/handler/filter as the * case may be. If not found, then fail. */ void Compiler::verCheckNestingLevel(EHNodeDsc* root) { EHNodeDsc* ehnNode = root; #define exchange(a, b) \ { \ temp = a; \ a = b; \ b = temp; \ } for (unsigned XTnum = 0; XTnum < compHndBBtabCount; XTnum++) { EHNodeDsc *p1, *p2, *p3, *temp, *search; p1 = ehnNode++; p2 = ehnNode++; // we are relying on the fact that ehn nodes are allocated sequentially. noway_assert(p1->ehnHandlerNode == p2); noway_assert(p2->ehnTryNode == p1); // arrange p1 and p2 in sequential order if (p1->ehnStartOffset == p2->ehnStartOffset) { BADCODE("shared exception handler"); } if (p1->ehnStartOffset > p2->ehnStartOffset) exchange(p1, p2); temp = p1->ehnNext; unsigned numSiblings = 0; search = p2; if (search->ehnEquivalent) { search = search->ehnEquivalent; } do { if (temp == search) { numSiblings++; break; } if (temp) { temp = temp->ehnNext; } } while (temp); CORINFO_EH_CLAUSE clause; info.compCompHnd->getEHinfo(info.compMethodHnd, XTnum, &clause); if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { p3 = ehnNode++; noway_assert(p3->ehnTryNode == p1 || p3->ehnTryNode == p2); noway_assert(p1->ehnFilterNode == p3 || p2->ehnFilterNode == p3); if (p3->ehnStartOffset < p1->ehnStartOffset) { temp = p3; search = p1; } else if (p3->ehnStartOffset < p2->ehnStartOffset) { temp = p1; search = p3; } else { temp = p2; search = p3; } if (search->ehnEquivalent) { search = search->ehnEquivalent; } do { if (temp == search) { numSiblings++; break; } temp = temp->ehnNext; } while (temp); } else { numSiblings++; } if (numSiblings != 2) { BADCODE("Outer block does not contain all code in inner handler"); } } } #if defined(FEATURE_EH_FUNCLETS) #if defined(TARGET_ARM) /***************************************************************************** * We just removed a BBJ_CALLFINALLY/BBJ_ALWAYS pair. If this was the only such pair * targeting the BBJ_ALWAYS target, then we need to clear the BBF_FINALLY_TARGET bit * so that target can also be removed. 'block' is the finally target. Since we just * removed the BBJ_ALWAYS, it better have the BBF_FINALLY_TARGET bit set. */ void Compiler::fgClearFinallyTargetBit(BasicBlock* block) { assert(fgComputePredsDone); assert((block->bbFlags & BBF_FINALLY_TARGET) != 0); for (BasicBlock* const predBlock : block->PredBlocks()) { if (predBlock->bbJumpKind == BBJ_ALWAYS && predBlock->bbJumpDest == block) { BasicBlock* pPrev = predBlock->bbPrev; if (pPrev != nullptr) { if (pPrev->bbJumpKind == BBJ_CALLFINALLY) { // We found a BBJ_CALLFINALLY / BBJ_ALWAYS that still points to this finally target return; } } } } // Didn't find any BBJ_CALLFINALLY / BBJ_ALWAYS that still points here, so clear the bit block->bbFlags &= ~BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /***************************************************************************** * Is this an intra-handler control flow edge? * * 'block' is the head block of a funclet/handler region, or . * 'predBlock' is a predecessor block of 'block' in the predecessor list. * * 'predBlock' can legally only be one of three things: * 1. in the same handler region (e.g., the source of a back-edge of a loop from * 'predBlock' to 'block'), including in nested regions within the handler, * 2. if 'block' begins a handler that is a filter-handler, 'predBlock' must be in the 'filter' region, * 3. for other handlers, 'predBlock' must be in the 'try' region corresponding to handler (or any * region nested in the 'try' region). * * Note that on AMD64/ARM64, the BBJ_CALLFINALLY block that calls a finally handler is not * within the corresponding 'try' region: it is placed in the corresponding 'try' region's * parent (which might be the main function body). This is how it is represented to the VM * (with a special "cloned finally" EH table entry). * * Return 'true' for case #1, and 'false' otherwise. */ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) { // Some simple preconditions (as stated above) assert(!fgFuncletsCreated); assert(fgGetPredForBlock(block, predBlock) != nullptr); assert(block->hasHndIndex()); EHblkDsc* xtab = ehGetDsc(block->getHndIndex()); #if FEATURE_EH_CALLFINALLY_THUNKS if (xtab->HasFinallyHandler()) { assert((xtab->ebdHndBeg == block) || // The normal case ((xtab->ebdHndBeg->bbNext == block) && (xtab->ebdHndBeg->bbFlags & BBF_INTERNAL))); // After we've already inserted a header block, and we're // trying to decide how to split up the predecessor edges. if (predBlock->bbJumpKind == BBJ_CALLFINALLY) { assert(predBlock->bbJumpDest == block); // A BBJ_CALLFINALLY predecessor of the handler can only come from the corresponding try, // not from any EH clauses nested in this handler. However, we represent the BBJ_CALLFINALLY // as being in the 'try' region's parent EH region, which might be the main function body. unsigned tryIndex = xtab->ebdEnclosingTryIndex; if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { assert(!predBlock->hasTryIndex()); } else { assert(predBlock->hasTryIndex()); assert(tryIndex == predBlock->getTryIndex()); assert(ehGetDsc(tryIndex)->InTryRegionBBRange(predBlock)); } return false; } } #endif // FEATURE_EH_CALLFINALLY_THUNKS assert(predBlock->hasHndIndex() || predBlock->hasTryIndex()); // We could search the try region looking for predBlock by using bbInTryRegions // but that does a lexical search for the block, and then assumes funclets // have been created and does a lexical search of all funclets that were pulled // out of the parent try region. // First, funclets haven't been created yet, and even if they had been, we shouldn't // have any funclet directly branching to another funclet (they have to return first). // So we can safely use CheckIsTryRegion instead of bbInTryRegions. // Second, I believe the depth of any EH graph will on average be smaller than the // breadth of the blocks within a try body. Thus it is faster to get our answer by // looping outward over the region graph. However, I have added asserts, as a // precaution, to ensure both algorithms agree. The asserts also check that the only // way to reach the head of a funclet is from the corresponding try body or from // within the funclet (and *not* any nested funclets). if (predBlock->hasTryIndex()) { // Because the EH clauses are listed inside-out, any nested trys will be at a // lower index than the current try and if there's no enclosing try, tryIndex // will terminate at NO_ENCLOSING_INDEX unsigned tryIndex = predBlock->getTryIndex(); while (tryIndex < block->getHndIndex()) { tryIndex = ehGetEnclosingTryIndex(tryIndex); } // tryIndex should enclose predBlock assert((tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) || ehGetDsc(tryIndex)->InTryRegionBBRange(predBlock)); // At this point tryIndex is either block's handler's corresponding try body // or some outer try region that contains both predBlock & block or // NO_ENCLOSING_REGION (because there was no try body that encloses both). if (tryIndex == block->getHndIndex()) { assert(xtab->InTryRegionBBRange(predBlock)); assert(!xtab->InHndRegionBBRange(predBlock)); return false; } // tryIndex should enclose block (and predBlock as previously asserted) assert((tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) || ehGetDsc(tryIndex)->InTryRegionBBRange(block)); } if (xtab->HasFilter()) { // The block is a handler. Check if the pred block is from its filter. We only need to // check the end filter flag, as there is only a single filter for any handler, and we // already know predBlock is a predecessor of block. if (predBlock->bbJumpKind == BBJ_EHFILTERRET) { assert(!xtab->InHndRegionBBRange(predBlock)); return false; } } // It is not in our try region (or filter), so it must be within this handler (or try bodies // within this handler) assert(!xtab->InTryRegionBBRange(predBlock)); assert(xtab->InHndRegionBBRange(predBlock)); return true; } /***************************************************************************** * Does this block, first block of a handler region, have any predecessor edges * that are not from its corresponding try region? */ bool Compiler::fgAnyIntraHandlerPreds(BasicBlock* block) { assert(block->hasHndIndex()); assert(fgFirstBlockOfHandler(block) == block); // this block is the first block of a handler for (BasicBlock* const predBlock : block->PredBlocks()) { if (fgIsIntraHandlerPred(predBlock, block)) { // We have a predecessor that is not from our try region return true; } } return false; } #else // !FEATURE_EH_FUNCLETS /***************************************************************************** * * Function called to relocate any and all EH regions. * Only entire consecutive EH regions will be moved and they will be kept together. * Except for the first block, the range can not have any blocks that jump into or out of the region. */ bool Compiler::fgRelocateEHRegions() { bool result = false; // Our return value #ifdef DEBUG if (verbose) printf("*************** In fgRelocateEHRegions()\n"); #endif if (fgCanRelocateEHRegions) { unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Nested EH regions cannot be moved. // Also we don't want to relocate an EH region that has a filter if ((HBtab->ebdHandlerNestingLevel == 0) && !HBtab->HasFilter()) { bool movedTry = false; #if DEBUG bool movedHnd = false; #endif // DEBUG // Only try to move the outermost try region if (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { // Move the entire try region if it can be moved if (HBtab->ebdTryBeg->isRunRarely()) { BasicBlock* bTryLastBB = fgRelocateEHRange(XTnum, FG_RELOCATE_TRY); if (bTryLastBB != NULL) { result = true; movedTry = true; } } #if DEBUG if (verbose && movedTry) { printf("\nAfter relocating an EH try region"); fgDispBasicBlocks(); fgDispHandlerTab(); // Make sure that the predecessor lists are accurate if (expensiveDebugCheckLevel >= 2) { fgDebugCheckBBlist(); } } #endif // DEBUG } // Currently it is not good to move the rarely run handler regions to the end of the method // because fgDetermineFirstColdBlock() must put the start of any handler region in the hot // section. CLANG_FORMAT_COMMENT_ANCHOR; #if 0 // Now try to move the entire handler region if it can be moved. // Don't try to move a finally handler unless we already moved the try region. if (HBtab->ebdHndBeg->isRunRarely() && !HBtab->ebdHndBeg->hasTryIndex() && (movedTry || !HBtab->HasFinallyHandler())) { BasicBlock* bHndLastBB = fgRelocateEHRange(XTnum, FG_RELOCATE_HANDLER); if (bHndLastBB != NULL) { result = true; movedHnd = true; } } #endif // 0 #if DEBUG if (verbose && movedHnd) { printf("\nAfter relocating an EH handler region"); fgDispBasicBlocks(); fgDispHandlerTab(); // Make sure that the predecessor lists are accurate if (expensiveDebugCheckLevel >= 2) { fgDebugCheckBBlist(); } } #endif // DEBUG } } } #if DEBUG fgVerifyHandlerTab(); if (verbose && result) { printf("\nAfter fgRelocateEHRegions()"); fgDispBasicBlocks(); fgDispHandlerTab(); // Make sure that the predecessor lists are accurate fgDebugCheckBBlist(); } #endif // DEBUG return result; } #endif // !FEATURE_EH_FUNCLETS /***************************************************************************** * We've inserted a new block before 'block' that should be part of the same EH region as 'block'. * Update the EH table to make this so. Also, set the new block to have the right EH region data * (copy the bbTryIndex, bbHndIndex, and bbCatchTyp from 'block' to the new predecessor, and clear * 'bbCatchTyp' from 'block'). */ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) { assert(block->bbPrev != nullptr); BasicBlock* bPrev = block->bbPrev; bPrev->copyEHRegion(block); // The first block (and only the first block) of a handler has bbCatchTyp set bPrev->bbCatchTyp = block->bbCatchTyp; block->bbCatchTyp = BBCT_NONE; for (EHblkDsc* const HBtab : EHClauses(this)) { /* Multiple pointers in EHblkDsc can point to same block. We can not early out after the first match. */ if (HBtab->ebdTryBeg == block) { #ifdef DEBUG if (verbose) { printf("EH#%u: New first block of try: " FMT_BB "\n", ehGetIndex(HBtab), bPrev->bbNum); } #endif // DEBUG HBtab->ebdTryBeg = bPrev; bPrev->bbFlags |= BBF_TRY_BEG | BBF_DONT_REMOVE; // clear the TryBeg flag unless it begins another try region if (!bbIsTryBeg(block)) { block->bbFlags &= ~BBF_TRY_BEG; } } if (HBtab->ebdHndBeg == block) { #ifdef DEBUG if (verbose) { printf("EH#%u: New first block of handler: " FMT_BB "\n", ehGetIndex(HBtab), bPrev->bbNum); } #endif // DEBUG // The first block of a handler has an artificial extra refcount. Transfer that to the new block. noway_assert(block->countOfInEdges() > 0); block->bbRefs--; HBtab->ebdHndBeg = bPrev; bPrev->bbFlags |= BBF_DONT_REMOVE; #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { assert((block->bbFlags & BBF_FUNCLET_BEG) != 0); bPrev->bbFlags |= BBF_FUNCLET_BEG; block->bbFlags &= ~BBF_FUNCLET_BEG; } #endif // FEATURE_EH_FUNCLETS bPrev->bbRefs++; // If this is a handler for a filter, the last block of the filter will end with // a BBJ_EHFILTERRET block that has a bbJumpDest that jumps to the first block of // its handler. So we need to update it to keep things in sync. // if (HBtab->HasFilter()) { BasicBlock* bFilterLast = HBtab->BBFilterLast(); assert(bFilterLast != nullptr); assert(bFilterLast->bbJumpKind == BBJ_EHFILTERRET); assert(bFilterLast->bbJumpDest == block); #ifdef DEBUG if (verbose) { printf("EH#%u: Updating bbJumpDest for filter ret block: " FMT_BB " => " FMT_BB "\n", ehGetIndex(HBtab), bFilterLast->bbNum, bPrev->bbNum); } #endif // DEBUG // Change the bbJumpDest for bFilterLast from the old first 'block' to the new first 'bPrev' bFilterLast->bbJumpDest = bPrev; } } if (HBtab->HasFilter() && (HBtab->ebdFilter == block)) { #ifdef DEBUG if (verbose) { printf("EH#%u: New first block of filter: " FMT_BB "\n", ehGetIndex(HBtab), bPrev->bbNum); } #endif // DEBUG // The first block of a filter has an artificial extra refcount. Transfer that to the new block. noway_assert(block->countOfInEdges() > 0); block->bbRefs--; HBtab->ebdFilter = bPrev; bPrev->bbFlags |= BBF_DONT_REMOVE; #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { assert((block->bbFlags & BBF_FUNCLET_BEG) != 0); bPrev->bbFlags |= BBF_FUNCLET_BEG; block->bbFlags &= ~BBF_FUNCLET_BEG; } #endif // FEATURE_EH_FUNCLETS bPrev->bbRefs++; } } } /***************************************************************************** * We've inserted a new block after 'block' that should be part of the same EH region as 'block'. * Update the EH table to make this so. Also, set the new block to have the right EH region data. */ void Compiler::fgExtendEHRegionAfter(BasicBlock* block) { BasicBlock* newBlk = block->bbNext; assert(newBlk != nullptr); newBlk->copyEHRegion(block); newBlk->bbCatchTyp = BBCT_NONE; // Only the first block of a catch has this set, and 'newBlk' can't be the first block of a catch. // TODO-Throughput: if the block is not in an EH region, then we don't need to walk the EH table looking for 'last' // block pointers to update. ehUpdateLastBlocks(block, newBlk); } //------------------------------------------------------------------------ // fgCheckEHCanInsertAfterBlock: Determine if a block can be inserted after // 'blk' and legally be put in the EH region specified by 'regionIndex'. This // can be true if the most nested region the block is in is already 'regionIndex', // as we'll just extend the most nested region (and any region ending at the same block). // It can also be true if it is the end of (a set of) EH regions, such that // inserting the block and properly extending some EH regions (if necessary) // puts the block in the correct region. We only consider the case of extending // an EH region after 'blk' (that is, to include 'blk' and the newly insert block); // we don't consider inserting a block as the the first block of an EH region following 'blk'. // // Consider this example: // // try3 try2 try1 // |--- | | BB01 // | |--- | BB02 // | | |--- BB03 // | | | BB04 // | |--- |--- BB05 // | BB06 // |----------------- BB07 // // Passing BB05 and try1/try2/try3 as the region to insert into (as well as putInTryRegion==true) // will all return 'true'. Here are the cases: // 1. Insert into try1: the most nested EH region BB05 is in is already try1, so we can insert after // it and extend try1 (and try2). // 2. Insert into try2: we can extend try2, but leave try1 alone. // 3. Insert into try3: we can leave try1 and try2 alone, and put the new block just in try3. Note that // in this case, after we "loop outwards" in the EH nesting, we get to a place where we're in the middle // of the try3 region, not at the end of it. // In all cases, it is possible to put a block after BB05 and put it in any of these three 'try' regions legally. // // Filters are ignored; if 'blk' is in a filter, the answer will be false. // // Arguments: // blk - the BasicBlock we are checking to see if we can insert after. // regionIndex - the EH region we want to insert a block into. regionIndex is // in the range [0..compHndBBtabCount]; 0 means "main method". // putInTryRegion - 'true' if the new block should be inserted in the 'try' region of 'regionIndex'. // For regionIndex 0 (the "main method"), this should be 'true'. // // Return Value: // 'true' if a block can be inserted after 'blk' and put in EH region 'regionIndex', else 'false'. // bool Compiler::fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion) { assert(blk != nullptr); assert(regionIndex <= compHndBBtabCount); if (regionIndex == 0) { assert(putInTryRegion); } bool inTryRegion; unsigned nestedRegionIndex = ehGetMostNestedRegionIndex(blk, &inTryRegion); bool insertOK = true; for (;;) { if (nestedRegionIndex == regionIndex) { // This block is in the region we want to be in. We can insert here if it's the right type of region. // (If we want to be in the 'try' region, but the block is in the handler region, then inserting a // new block after 'blk' can't put it in the 'try' region, and vice-versa, since we only consider // extending regions after, not prepending to regions.) // This check will be 'true' if we are trying to put something in the main function (as putInTryRegion // must be 'true' if regionIndex is zero, and inTryRegion will also be 'true' if nestedRegionIndex is zero). insertOK = (putInTryRegion == inTryRegion); break; } else if (nestedRegionIndex == 0) { // The block is in the main function, but we want to put something in a nested region. We can't do that. insertOK = false; break; } assert(nestedRegionIndex > 0); EHblkDsc* ehDsc = ehGetDsc(nestedRegionIndex - 1); // ehGetDsc uses [0..compHndBBtabCount) form. if (inTryRegion) { if (blk != ehDsc->ebdTryLast) { // Not the last block? Then it must be somewhere else within the try region, so we can't insert here. insertOK = false; break; // exit the 'for' loop } } else { // We ignore filters. if (blk != ehDsc->ebdHndLast) { // Not the last block? Then it must be somewhere else within the handler region, so we can't insert // here. insertOK = false; break; // exit the 'for' loop } } // Things look good for this region; check the enclosing regions, if any. nestedRegionIndex = ehGetEnclosingRegionIndex(nestedRegionIndex - 1, &inTryRegion); // ehGetEnclosingRegionIndex uses [0..compHndBBtabCount) form. // Convert to [0..compHndBBtabCount] form. nestedRegionIndex = (nestedRegionIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : nestedRegionIndex + 1; } // end of for(;;) return insertOK; } //------------------------------------------------------------------------ // fgIsFirstBlockOfFilterOrHandler: return true if the given block is the first block of an EH handler // or filter. // // Arguments: // block - the BasicBlock in question // // Return Value: // As described above. // bool Compiler::fgIsFirstBlockOfFilterOrHandler(BasicBlock* block) { if (!block->hasHndIndex()) { return false; } EHblkDsc* ehDsc = ehGetDsc(block->getHndIndex()); if (ehDsc->ebdHndBeg == block) { return true; } if (ehDsc->HasFilter() && (ehDsc->ebdFilter == block)) { return true; } return false; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Exception Handling XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX "EHblkDsc" functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ BasicBlock* EHblkDsc::BBFilterLast() { noway_assert(HasFilter()); noway_assert(ebdFilter != nullptr); noway_assert(ebdHndBeg != nullptr); // The last block of the filter is the block immediately preceding the first block of the handler. return ebdHndBeg->bbPrev; } BasicBlock* EHblkDsc::ExFlowBlock() { if (HasFilter()) { return ebdFilter; } else { return ebdHndBeg; } } bool EHblkDsc::InTryRegionILRange(BasicBlock* pBlk) { // BBF_INTERNAL blocks may not have a valid bbCodeOffs. This function // should only be used before any BBF_INTERNAL blocks have been added. assert(!(pBlk->bbFlags & BBF_INTERNAL)); return Compiler::jitIsBetween(pBlk->bbCodeOffs, ebdTryBegOffs(), ebdTryEndOffs()); } bool EHblkDsc::InFilterRegionILRange(BasicBlock* pBlk) { // BBF_INTERNAL blocks may not have a valid bbCodeOffs. This function // should only be used before any BBF_INTERNAL blocks have been added. assert(!(pBlk->bbFlags & BBF_INTERNAL)); return HasFilter() && Compiler::jitIsBetween(pBlk->bbCodeOffs, ebdFilterBegOffs(), ebdFilterEndOffs()); } bool EHblkDsc::InHndRegionILRange(BasicBlock* pBlk) { // BBF_INTERNAL blocks may not have a valid bbCodeOffs. This function // should only be used before any BBF_INTERNAL blocks have been added. assert(!(pBlk->bbFlags & BBF_INTERNAL)); return Compiler::jitIsBetween(pBlk->bbCodeOffs, ebdHndBegOffs(), ebdHndEndOffs()); } // HasCatchHandler: returns 'true' for either try/catch, or try/filter/filter-handler. bool EHblkDsc::HasCatchHandler() { return (ebdHandlerType == EH_HANDLER_CATCH) || (ebdHandlerType == EH_HANDLER_FILTER); } bool EHblkDsc::HasFilter() { return ebdHandlerType == EH_HANDLER_FILTER; } bool EHblkDsc::HasFinallyHandler() { return ebdHandlerType == EH_HANDLER_FINALLY; } bool EHblkDsc::HasFaultHandler() { return (ebdHandlerType == EH_HANDLER_FAULT) || (ebdHandlerType == EH_HANDLER_FAULT_WAS_FINALLY); } bool EHblkDsc::HasFinallyOrFaultHandler() { return HasFinallyHandler() || HasFaultHandler(); } /***************************************************************************** * Returns true if pBlk is a block in the range [pStart..pEnd). * The check is inclusive of pStart, exclusive of pEnd. */ bool EHblkDsc::InBBRange(BasicBlock* pBlk, BasicBlock* pStart, BasicBlock* pEnd) { for (BasicBlock* pWalk = pStart; pWalk != pEnd; pWalk = pWalk->bbNext) { if (pWalk == pBlk) { return true; } } return false; } bool EHblkDsc::InTryRegionBBRange(BasicBlock* pBlk) { return InBBRange(pBlk, ebdTryBeg, ebdTryLast->bbNext); } bool EHblkDsc::InFilterRegionBBRange(BasicBlock* pBlk) { return HasFilter() && InBBRange(pBlk, ebdFilter, ebdHndBeg); } bool EHblkDsc::InHndRegionBBRange(BasicBlock* pBlk) { return InBBRange(pBlk, ebdHndBeg, ebdHndLast->bbNext); } unsigned EHblkDsc::ebdGetEnclosingRegionIndex(bool* inTryRegion) { if ((ebdEnclosingTryIndex == NO_ENCLOSING_INDEX) && (ebdEnclosingHndIndex == NO_ENCLOSING_INDEX)) { return NO_ENCLOSING_INDEX; } else if (ebdEnclosingTryIndex == NO_ENCLOSING_INDEX) { assert(ebdEnclosingHndIndex != NO_ENCLOSING_INDEX); *inTryRegion = false; return ebdEnclosingHndIndex; } else if (ebdEnclosingHndIndex == NO_ENCLOSING_INDEX) { assert(ebdEnclosingTryIndex != NO_ENCLOSING_INDEX); *inTryRegion = true; return ebdEnclosingTryIndex; } else { assert(ebdEnclosingTryIndex != NO_ENCLOSING_INDEX); assert(ebdEnclosingHndIndex != NO_ENCLOSING_INDEX); assert(ebdEnclosingTryIndex != ebdEnclosingHndIndex); if (ebdEnclosingTryIndex < ebdEnclosingHndIndex) { *inTryRegion = true; return ebdEnclosingTryIndex; } else { *inTryRegion = false; return ebdEnclosingHndIndex; } } } /*****************************************************************************/ // We used to assert that the IL offsets in the EH table matched the IL offset stored // on the blocks pointed to by the try/filter/handler block pointers. This is true at // import time, but can fail to be true later in compilation when we start doing // flow optimizations. // // That being said, the IL offsets in the EH table should only be examined early, // during importing. After importing, use block info instead. IL_OFFSET EHblkDsc::ebdTryBegOffs() { return ebdTryBegOffset; } IL_OFFSET EHblkDsc::ebdTryEndOffs() { return ebdTryEndOffset; } IL_OFFSET EHblkDsc::ebdHndBegOffs() { return ebdHndBegOffset; } IL_OFFSET EHblkDsc::ebdHndEndOffs() { return ebdHndEndOffset; } IL_OFFSET EHblkDsc::ebdFilterBegOffs() { assert(HasFilter()); return ebdFilterBegOffset; } IL_OFFSET EHblkDsc::ebdFilterEndOffs() { assert(HasFilter()); return ebdHndBegOffs(); // end of filter is beginning of handler } /* static */ bool EHblkDsc::ebdIsSameILTry(EHblkDsc* h1, EHblkDsc* h2) { return ((h1->ebdTryBegOffset == h2->ebdTryBegOffset) && (h1->ebdTryEndOffset == h2->ebdTryEndOffset)); } /*****************************************************************************/ /* static */ bool EHblkDsc::ebdIsSameTry(EHblkDsc* h1, EHblkDsc* h2) { return ((h1->ebdTryBeg == h2->ebdTryBeg) && (h1->ebdTryLast == h2->ebdTryLast)); } bool EHblkDsc::ebdIsSameTry(Compiler* comp, unsigned t2) { EHblkDsc* h2 = comp->ehGetDsc(t2); return ebdIsSameTry(this, h2); } bool EHblkDsc::ebdIsSameTry(BasicBlock* ebdTryBeg, BasicBlock* ebdTryLast) { return ((this->ebdTryBeg == ebdTryBeg) && (this->ebdTryLast == ebdTryLast)); } /*****************************************************************************/ #ifdef DEBUG /*****************************************************************************/ void EHblkDsc::DispEntry(unsigned XTnum) { printf(" %2u ::", XTnum); #if !defined(FEATURE_EH_FUNCLETS) printf(" %2u ", XTnum, ebdHandlerNestingLevel); #endif // !FEATURE_EH_FUNCLETS if (ebdEnclosingTryIndex == NO_ENCLOSING_INDEX) { printf(" "); } else { printf(" %2u ", ebdEnclosingTryIndex); } if (ebdEnclosingHndIndex == NO_ENCLOSING_INDEX) { printf(" "); } else { printf(" %2u ", ebdEnclosingHndIndex); } ////////////// ////////////// Protected (try) region ////////////// printf("- Try at " FMT_BB ".." FMT_BB, ebdTryBeg->bbNum, ebdTryLast->bbNum); /* ( brace matching editor workaround to compensate for the following line */ printf(" [%03X..%03X), ", ebdTryBegOffset, ebdTryEndOffset); ////////////// ////////////// Filter region ////////////// if (HasFilter()) { /* ( brace matching editor workaround to compensate for the following line */ printf("Filter at " FMT_BB ".." FMT_BB " [%03X..%03X), ", ebdFilter->bbNum, BBFilterLast()->bbNum, ebdFilterBegOffset, ebdHndBegOffset); } ////////////// ////////////// Handler region ////////////// if (ebdHndBeg->bbCatchTyp == BBCT_FINALLY) { printf("Finally"); } else if (ebdHndBeg->bbCatchTyp == BBCT_FAULT) { printf("Fault "); } else { printf("Handler"); } printf(" at " FMT_BB ".." FMT_BB, ebdHndBeg->bbNum, ebdHndLast->bbNum); /* ( brace matching editor workaround to compensate for the following line */ printf(" [%03X..%03X)", ebdHndBegOffset, ebdHndEndOffset); printf("\n"); } /*****************************************************************************/ #endif // DEBUG /*****************************************************************************/ /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX "Compiler" functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ bool Compiler::bbInCatchHandlerILRange(BasicBlock* blk) { EHblkDsc* HBtab = ehGetBlockHndDsc(blk); if (HBtab == nullptr) { return false; } return HBtab->HasCatchHandler() && HBtab->InHndRegionILRange(blk); } bool Compiler::bbInFilterILRange(BasicBlock* blk) { EHblkDsc* HBtab = ehGetBlockHndDsc(blk); if (HBtab == nullptr) { return false; } return HBtab->InFilterRegionILRange(blk); } // Given a handler region, find the innermost try region that contains it. // NOTE: handlerIndex is 1-based (0 means no handler). unsigned short Compiler::bbFindInnermostTryRegionContainingHandlerRegion(unsigned handlerIndex) { if (handlerIndex > 0) { unsigned XTnum; EHblkDsc* ehDsc; BasicBlock* blk = ehGetDsc(handlerIndex - 1)->ebdHndBeg; // handlerIndex is 1 based, therefore our interesting clauses start from clause compHndBBtab[handlerIndex] EHblkDsc* ehDscEnd = compHndBBtab + compHndBBtabCount; for (ehDsc = compHndBBtab + handlerIndex, XTnum = handlerIndex; ehDsc < ehDscEnd; ehDsc++, XTnum++) { if (bbInTryRegions(XTnum, blk)) { noway_assert(XTnum < MAX_XCPTN_INDEX); return (unsigned short)(XTnum + 1); // Return the tryIndex } } } return 0; } // Given a try region, find the innermost handler region that contains it. // NOTE: tryIndex is 1-based (0 means no handler). unsigned short Compiler::bbFindInnermostHandlerRegionContainingTryRegion(unsigned tryIndex) { if (tryIndex > 0) { unsigned XTnum; EHblkDsc* ehDsc; BasicBlock* blk = ehGetDsc(tryIndex - 1)->ebdTryBeg; // tryIndex is 1 based, our interesting clauses start from clause compHndBBtab[tryIndex] EHblkDsc* ehDscEnd = compHndBBtab + compHndBBtabCount; for (ehDsc = compHndBBtab + tryIndex, XTnum = tryIndex; ehDsc < ehDscEnd; ehDsc++, XTnum++) { if (bbInHandlerRegions(XTnum, blk)) { noway_assert(XTnum < MAX_XCPTN_INDEX); return (unsigned short)(XTnum + 1); // Return the handlerIndex } } } return 0; } /* Given a block and a try region index, check to see if the block is within the try body. For this check, a funclet is considered to be in the region it was extracted from. */ bool Compiler::bbInTryRegions(unsigned regionIndex, BasicBlock* blk) { assert(regionIndex < EHblkDsc::NO_ENCLOSING_INDEX); unsigned tryIndex = blk->hasTryIndex() ? blk->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX; // Loop outward until we find an enclosing try that is the same as the one // we are looking for or an outer/later one while (tryIndex < regionIndex) { tryIndex = ehGetEnclosingTryIndex(tryIndex); } // Now we have the index of 2 try bodies, either they match or not! return (tryIndex == regionIndex); } //------------------------------------------------------------------------ // bbInExnFlowRegions: // Check to see if an exception raised in the given block could be // handled by the given region (possibly after inner regions). // // Arguments: // regionIndex - Check if this region can handle exceptions from 'blk' // blk - Consider exceptions raised from this block // // Return Value: // true - The region with index 'regionIndex' can handle exceptions from 'blk' // false - The region with index 'regionIndex' can't handle exceptions from 'blk' // // Notes: // For this check, a funclet is considered to be in the region it was // extracted from. bool Compiler::bbInExnFlowRegions(unsigned regionIndex, BasicBlock* blk) { assert(regionIndex < EHblkDsc::NO_ENCLOSING_INDEX); EHblkDsc* ExnFlowRegion = ehGetBlockExnFlowDsc(blk); unsigned tryIndex = (ExnFlowRegion == nullptr ? EHblkDsc::NO_ENCLOSING_INDEX : ehGetIndex(ExnFlowRegion)); // Loop outward until we find an enclosing try that is the same as the one // we are looking for or an outer/later one while (tryIndex < regionIndex) { tryIndex = ehGetEnclosingTryIndex(tryIndex); } // Now we have the index of 2 try bodies, either they match or not! return (tryIndex == regionIndex); } /* Given a block, check to see if it is in the handler block of the EH descriptor. For this check, a funclet is considered to be in the region it was extracted from. */ bool Compiler::bbInHandlerRegions(unsigned regionIndex, BasicBlock* blk) { assert(regionIndex < EHblkDsc::NO_ENCLOSING_INDEX); unsigned hndIndex = blk->hasHndIndex() ? blk->getHndIndex() : EHblkDsc::NO_ENCLOSING_INDEX; // We can't use the same simple trick here because there is no required ordering // of handlers (which also have no required ordering with respect to their try // bodies). while (hndIndex < EHblkDsc::NO_ENCLOSING_INDEX && hndIndex != regionIndex) { hndIndex = ehGetEnclosingHndIndex(hndIndex); } // Now we have the index of 2 try bodies, either they match or not! return (hndIndex == regionIndex); } /* Given a hndBlk, see if it is in one of tryBlk's catch handler regions. Since we create one EHblkDsc for each "catch" of a "try", we might end up with multiple EHblkDsc's that have the same ebdTryBeg and ebdTryLast, but different ebdHndBeg and ebdHndLast. Unfortunately getTryIndex() only returns the index of the first EHblkDsc. E.g. The following example shows that BB02 has a catch in BB03 and another catch in BB04. index nest, enclosing 0 :: 0, 1 - Try at BB01..BB02 [000..008], Handler at BB03 [009..016] 1 :: 0, - Try at BB01..BB02 [000..008], Handler at BB04 [017..022] This function will return true for bbInCatchHandlerRegions(BB02, BB03) and bbInCatchHandlerRegions(BB02, BB04) */ bool Compiler::bbInCatchHandlerRegions(BasicBlock* tryBlk, BasicBlock* hndBlk) { assert(tryBlk->hasTryIndex()); if (!hndBlk->hasHndIndex()) { return false; } unsigned XTnum = tryBlk->getTryIndex(); EHblkDsc* firstEHblkDsc = ehGetDsc(XTnum); EHblkDsc* ehDsc = firstEHblkDsc; // Rather than searching the whole list, take advantage of our sorting. // We will only match against blocks with the same try body (mutually // protect regions). Because of our sort ordering, such regions will // always be immediately adjacent, any nested regions will be before the // first of the set, and any outer regions will be after the last. // Also siblings will be before or after according to their location, // but never in between; while (XTnum > 0) { assert(EHblkDsc::ebdIsSameTry(firstEHblkDsc, ehDsc)); // Stop when the previous region is not mutually protect if (!EHblkDsc::ebdIsSameTry(firstEHblkDsc, ehDsc - 1)) { break; } ehDsc--; XTnum--; } // XTnum and ehDsc are now referring to the first region in the set of // mutually protect regions. assert(EHblkDsc::ebdIsSameTry(firstEHblkDsc, ehDsc)); assert((ehDsc == compHndBBtab) || !EHblkDsc::ebdIsSameTry(firstEHblkDsc, ehDsc - 1)); do { if (ehDsc->HasCatchHandler() && bbInHandlerRegions(XTnum, hndBlk)) { return true; } XTnum++; ehDsc++; } while (XTnum < compHndBBtabCount && EHblkDsc::ebdIsSameTry(firstEHblkDsc, ehDsc)); return false; } /****************************************************************************************** * Give two blocks, return the inner-most enclosing try region that contains both of them. * Return 0 if it does not find any try region (which means the inner-most region * is the method itself). */ unsigned short Compiler::bbFindInnermostCommonTryRegion(BasicBlock* bbOne, BasicBlock* bbTwo) { unsigned XTnum; for (XTnum = 0; XTnum < compHndBBtabCount; XTnum++) { if (bbInTryRegions(XTnum, bbOne) && bbInTryRegions(XTnum, bbTwo)) { noway_assert(XTnum < MAX_XCPTN_INDEX); return (unsigned short)(XTnum + 1); // Return the tryIndex } } return 0; } // bbIsTryBeg() returns true if this block is the start of any try region. // This is computed by examining the current values in the // EH table rather than just looking at the block->bbFlags. // // Note that a block is the beginning of any try region if it is the beginning of the // most nested try region it is a member of. Thus, we only need to check the EH // table entry related to the try index stored on the block. // bool Compiler::bbIsTryBeg(BasicBlock* block) { EHblkDsc* ehDsc = ehGetBlockTryDsc(block); return (ehDsc != nullptr) && (block == ehDsc->ebdTryBeg); } // bbIsHanderBeg() returns true if "block" is the start of any handler or filter. // Note that if a block is the beginning of a handler or filter, it must be the beginning // of the most nested handler or filter region it is in. Thus, we only need to look at the EH // descriptor corresponding to the handler index on the block. // bool Compiler::bbIsHandlerBeg(BasicBlock* block) { EHblkDsc* ehDsc = ehGetBlockHndDsc(block); return (ehDsc != nullptr) && ((block == ehDsc->ebdHndBeg) || (ehDsc->HasFilter() && (block == ehDsc->ebdFilter))); } bool Compiler::bbIsExFlowBlock(BasicBlock* block, unsigned* regionIndex) { if (block->hasHndIndex()) { *regionIndex = block->getHndIndex(); return block == ehGetDsc(*regionIndex)->ExFlowBlock(); } else { return false; } } bool Compiler::ehHasCallableHandlers() { #if defined(FEATURE_EH_FUNCLETS) // Any EH in the function? return compHndBBtabCount > 0; #else // !FEATURE_EH_FUNCLETS return ehNeedsShadowSPslots(); #endif // !FEATURE_EH_FUNCLETS } /****************************************************************************************** * Determine if 'block' is the last block of an EH 'try' or handler (ignoring filters). If so, * return the EH descriptor pointer for that EH region. Otherwise, return nullptr. */ EHblkDsc* Compiler::ehIsBlockTryLast(BasicBlock* block) { EHblkDsc* HBtab = ehGetBlockTryDsc(block); if ((HBtab != nullptr) && (HBtab->ebdTryLast == block)) { return HBtab; } return nullptr; } EHblkDsc* Compiler::ehIsBlockHndLast(BasicBlock* block) { EHblkDsc* HBtab = ehGetBlockHndDsc(block); if ((HBtab != nullptr) && (HBtab->ebdHndLast == block)) { return HBtab; } return nullptr; } bool Compiler::ehIsBlockEHLast(BasicBlock* block) { return (ehIsBlockTryLast(block) != nullptr) || (ehIsBlockHndLast(block) != nullptr); } //------------------------------------------------------------------------ // ehGetBlockExnFlowDsc: // Get the EH descriptor for the most nested region (if any) that may // handle exceptions raised in the given block // // Arguments: // block - Consider exceptions raised from this block // // Return Value: // nullptr - The given block's exceptions propagate to caller // non-null - This region is the innermost handler for exceptions raised in // the given block EHblkDsc* Compiler::ehGetBlockExnFlowDsc(BasicBlock* block) { EHblkDsc* hndDesc = ehGetBlockHndDsc(block); if ((hndDesc != nullptr) && hndDesc->InFilterRegionBBRange(block)) { // If an exception is thrown in a filter (or escapes a callee in a filter), // or if exception_continue_search (0/false) is returned at // the end of a filter, the (original) exception is propagated to // the next outer handler. The "next outer handler" is the handler // of the try region enclosing the try that the filter protects. // This may not be the same as the try region enclosing the filter, // e.g. in cases like this: // try { // ... // } filter (filter-part) { // handler-part // } catch { (or finally/fault/filter) // which is represented as two EHblkDscs with the same try range, // the inner protected by a filter and the outer protected by the // other handler; exceptions in the filter-part propagate to the // other handler, even though the other handler's try region does not // enclose the filter. unsigned outerIndex = hndDesc->ebdEnclosingTryIndex; if (outerIndex == EHblkDsc::NO_ENCLOSING_INDEX) { assert(!block->hasTryIndex()); return nullptr; } return ehGetDsc(outerIndex); } return ehGetBlockTryDsc(block); } bool Compiler::ehBlockHasExnFlowDsc(BasicBlock* block) { if (block->hasTryIndex()) { return true; } EHblkDsc* hndDesc = ehGetBlockHndDsc(block); return ((hndDesc != nullptr) && hndDesc->InFilterRegionBBRange(block) && (hndDesc->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX)); } //------------------------------------------------------------------------ // ehGetMostNestedRegionIndex: Return the region index of the most nested EH region this block is in. // The return value is in the range [0..compHndBBtabCount]. It is same scale as bbTryIndex/bbHndIndex: // 0 means main method, N is used as an index to compHndBBtab[N - 1]. If we don't return 0, then // *inTryRegion indicates whether the most nested region for the block is a 'try' clause or // filter/handler clause. For 0 return, *inTryRegion is set to true. // // Arguments: // block - the BasicBlock we want the region index for. // inTryRegion - an out parameter. As described above. // // Return Value: // As described above. // unsigned Compiler::ehGetMostNestedRegionIndex(BasicBlock* block, bool* inTryRegion) { assert(block != nullptr); assert(inTryRegion != nullptr); unsigned mostNestedRegion; if (block->bbHndIndex == 0) { mostNestedRegion = block->bbTryIndex; *inTryRegion = true; } else if (block->bbTryIndex == 0) { mostNestedRegion = block->bbHndIndex; *inTryRegion = false; } else { if (block->bbTryIndex < block->bbHndIndex) { mostNestedRegion = block->bbTryIndex; *inTryRegion = true; } else { assert(block->bbTryIndex != block->bbHndIndex); // A block can't be both in the 'try' and 'handler' region // of the same EH region mostNestedRegion = block->bbHndIndex; *inTryRegion = false; } } assert(mostNestedRegion <= compHndBBtabCount); return mostNestedRegion; } /***************************************************************************** * Returns the try index of the enclosing try, skipping all EH regions with the * same try region (that is, all 'mutual protect' regions). If there is no such * enclosing try, returns EHblkDsc::NO_ENCLOSING_INDEX. */ unsigned Compiler::ehTrueEnclosingTryIndexIL(unsigned regionIndex) { assert(regionIndex != EHblkDsc::NO_ENCLOSING_INDEX); EHblkDsc* ehDscRoot = ehGetDsc(regionIndex); EHblkDsc* HBtab = ehDscRoot; for (;;) { regionIndex = HBtab->ebdEnclosingTryIndex; if (regionIndex == EHblkDsc::NO_ENCLOSING_INDEX) { // No enclosing 'try'; we're done break; } HBtab = ehGetDsc(regionIndex); if (!EHblkDsc::ebdIsSameILTry(ehDscRoot, HBtab)) { // Found an enclosing 'try' that has a different 'try' region (is not mutually-protect with the // original region). Return it. break; } } return regionIndex; } unsigned Compiler::ehGetEnclosingRegionIndex(unsigned regionIndex, bool* inTryRegion) { assert(regionIndex != EHblkDsc::NO_ENCLOSING_INDEX); EHblkDsc* ehDsc = ehGetDsc(regionIndex); return ehDsc->ebdGetEnclosingRegionIndex(inTryRegion); } /***************************************************************************** * The argument 'block' has been deleted. Update the EH table so 'block' is no longer listed * as a 'last' block. You can't delete a 'begin' block this way. */ void Compiler::ehUpdateForDeletedBlock(BasicBlock* block) { assert(block->bbFlags & BBF_REMOVED); if (!block->hasTryIndex() && !block->hasHndIndex()) { // The block is not part of any EH region, so there is nothing to do. return; } BasicBlock* bPrev = block->bbPrev; assert(bPrev != nullptr); ehUpdateLastBlocks(block, bPrev); } /***************************************************************************** * Determine if an empty block can be deleted, and still preserve the EH normalization * rules on blocks. * * We only consider the case where the block to be deleted is the last block of a region, * and the region is being contracted such that the previous block will become the new * 'last' block. If this previous block is already a 'last' block, then we can't do the * delete, as that would cause a single block to be the 'last' block of multiple regions. */ bool Compiler::ehCanDeleteEmptyBlock(BasicBlock* block) { assert(block->isEmpty()); return true; #if 0 // This is disabled while the "multiple last block" normalization is disabled if (!fgNormalizeEHDone) { return true; } if (ehIsBlockEHLast(block)) { BasicBlock* bPrev = block->bbPrev; if ((bPrev != nullptr) && ehIsBlockEHLast(bPrev)) { return false; } } return true; #endif // 0 } /***************************************************************************** * The 'last' block of one or more EH regions might have changed. Update the EH table. * This can happen if the EH region shrinks, where one or more blocks have been removed * from the region. It can happen if the EH region grows, where one or more blocks * have been added at the end of the region. * * We might like to verify the handler table integrity after doing this update, but we * can't because this might just be one step by the caller in a transformation back to * a legal state. * * Arguments: * oldLast -- Search for this block as the 'last' block of one or more EH regions. * newLast -- If 'oldLast' is found to be the 'last' block of an EH region, replace it by 'newLast'. */ void Compiler::ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast) { for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->ebdTryLast == oldLast) { fgSetTryEnd(HBtab, newLast); } if (HBtab->ebdHndLast == oldLast) { fgSetHndEnd(HBtab, newLast); } } } unsigned Compiler::ehGetCallFinallyRegionIndex(unsigned finallyIndex, bool* inTryRegion) { assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); #if defined(TARGET_AMD64) || defined(TARGET_ARM64) return ehGetDsc(finallyIndex)->ebdGetEnclosingRegionIndex(inTryRegion); #else *inTryRegion = true; return finallyIndex; #endif } void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** begBlk, BasicBlock** endBlk) { assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); assert(begBlk != nullptr); assert(endBlk != nullptr); #if FEATURE_EH_CALLFINALLY_THUNKS bool inTryRegion; unsigned callFinallyRegionIndex = ehGetCallFinallyRegionIndex(finallyIndex, &inTryRegion); if (callFinallyRegionIndex == EHblkDsc::NO_ENCLOSING_INDEX) { *begBlk = fgFirstBB; *endBlk = fgEndBBAfterMainFunction(); } else { EHblkDsc* ehDsc = ehGetDsc(callFinallyRegionIndex); if (inTryRegion) { *begBlk = ehDsc->ebdTryBeg; *endBlk = ehDsc->ebdTryLast->bbNext; } else { *begBlk = ehDsc->ebdHndBeg; *endBlk = ehDsc->ebdHndLast->bbNext; } } #else // !FEATURE_EH_CALLFINALLY_THUNKS EHblkDsc* ehDsc = ehGetDsc(finallyIndex); *begBlk = ehDsc->ebdTryBeg; *endBlk = ehDsc->ebdTryLast->bbNext; #endif // !FEATURE_EH_CALLFINALLY_THUNKS } #ifdef DEBUG bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex) { assert(blockCallFinally->bbJumpKind == BBJ_CALLFINALLY); assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(finallyIndex < compHndBBtabCount); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); bool inTryRegion; unsigned callFinallyIndex = ehGetCallFinallyRegionIndex(finallyIndex, &inTryRegion); if (callFinallyIndex == EHblkDsc::NO_ENCLOSING_INDEX) { if (blockCallFinally->hasTryIndex() || blockCallFinally->hasHndIndex()) { // The BBJ_CALLFINALLY is supposed to be in the main function body, not in any EH region. return false; } else { return true; } } else { if (inTryRegion) { if (bbInTryRegions(callFinallyIndex, blockCallFinally)) { return true; } } else { if (bbInHandlerRegions(callFinallyIndex, blockCallFinally)) { return true; } } } return false; } #endif // DEBUG #if defined(FEATURE_EH_FUNCLETS) /***************************************************************************** * * Are there (or will there be) any funclets in the function? */ bool Compiler::ehAnyFunclets() { return compHndBBtabCount > 0; // if there is any EH, there will be funclets } /***************************************************************************** * * Count the number of EH funclets in the function. This will return the number * there will be after funclets have been created, but because it runs over the * EH table, it is accurate at any time. */ unsigned Compiler::ehFuncletCount() { unsigned funcletCnt = 0; for (EHblkDsc* const HBtab : EHClauses(this)) { if (HBtab->HasFilter()) { ++funcletCnt; } ++funcletCnt; } return funcletCnt; } /***************************************************************************** * * Get the index to use as the cache key for sharing throw blocks. * For non-funclet platforms, this is just the block's bbTryIndex, to ensure * that throw is protected by the correct set of trys. However, when we have * funclets we also have to ensure that the throw blocks are *not* shared * across funclets, so we use EHblkDsc index of either the funclet or * the containing try region, whichever is inner-most. We differentiate * between the 3 cases by setting the high bits (0 = try, 1 = handler, * 2 = filter) * */ unsigned Compiler::bbThrowIndex(BasicBlock* blk) { if (!blk->hasTryIndex() && !blk->hasHndIndex()) { return -1; } const unsigned tryIndex = blk->hasTryIndex() ? blk->getTryIndex() : USHRT_MAX; const unsigned hndIndex = blk->hasHndIndex() ? blk->getHndIndex() : USHRT_MAX; assert(tryIndex != hndIndex); assert(tryIndex != USHRT_MAX || hndIndex != USHRT_MAX); if (tryIndex < hndIndex) { // The most enclosing region is a try body, use it assert(tryIndex <= 0x3FFFFFFF); return tryIndex; } // The most enclosing region is a handler which will be a funclet // Now we have to figure out if blk is in the filter or handler assert(hndIndex <= 0x3FFFFFFF); if (ehGetDsc(hndIndex)->InFilterRegionBBRange(blk)) { return hndIndex | 0x40000000; } return hndIndex | 0x80000000; } #endif // FEATURE_EH_FUNCLETS /***************************************************************************** * Determine the emitter code cookie for a block, for unwind purposes. */ void* Compiler::ehEmitCookie(BasicBlock* block) { noway_assert(block); void* cookie; #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) if (block->bbFlags & BBF_FINALLY_TARGET) { // Use the offset of the beginning of the NOP padding, not the main block. // This might include loop head padding, too, if this is a loop head. assert(block->bbUnwindNopEmitCookie); // probably not null-initialized, though, so this might not tell us // anything cookie = block->bbUnwindNopEmitCookie; } else #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) { cookie = block->bbEmitCookie; } noway_assert(cookie != nullptr); return cookie; } /***************************************************************************** * Determine the emitter code offset for a block. If the block is a finally * target, choose the offset of the NOP padding that precedes the block. */ UNATIVE_OFFSET Compiler::ehCodeOffset(BasicBlock* block) { return GetEmitter()->emitCodeOffset(ehEmitCookie(block), 0); } /****************************************************************************/ EHblkDsc* Compiler::ehInitHndRange(BasicBlock* blk, IL_OFFSET* hndBeg, IL_OFFSET* hndEnd, bool* inFilter) { EHblkDsc* hndTab = ehGetBlockHndDsc(blk); if (hndTab != nullptr) { if (hndTab->InFilterRegionILRange(blk)) { *hndBeg = hndTab->ebdFilterBegOffs(); *hndEnd = hndTab->ebdFilterEndOffs(); *inFilter = true; } else { *hndBeg = hndTab->ebdHndBegOffs(); *hndEnd = hndTab->ebdHndEndOffs(); *inFilter = false; } } else { *hndBeg = 0; *hndEnd = info.compILCodeSize; *inFilter = false; } return hndTab; } /****************************************************************************/ EHblkDsc* Compiler::ehInitTryRange(BasicBlock* blk, IL_OFFSET* tryBeg, IL_OFFSET* tryEnd) { EHblkDsc* tryTab = ehGetBlockTryDsc(blk); if (tryTab != nullptr) { *tryBeg = tryTab->ebdTryBegOffs(); *tryEnd = tryTab->ebdTryEndOffs(); } else { *tryBeg = 0; *tryEnd = info.compILCodeSize; } return tryTab; } /****************************************************************************/ EHblkDsc* Compiler::ehInitHndBlockRange(BasicBlock* blk, BasicBlock** hndBeg, BasicBlock** hndLast, bool* inFilter) { EHblkDsc* hndTab = ehGetBlockHndDsc(blk); if (hndTab != nullptr) { if (hndTab->InFilterRegionBBRange(blk)) { *hndBeg = hndTab->ebdFilter; if (hndLast != nullptr) { *hndLast = hndTab->BBFilterLast(); } *inFilter = true; } else { *hndBeg = hndTab->ebdHndBeg; if (hndLast != nullptr) { *hndLast = hndTab->ebdHndLast; } *inFilter = false; } } else { *hndBeg = nullptr; if (hndLast != nullptr) { *hndLast = nullptr; } *inFilter = false; } return hndTab; } /****************************************************************************/ EHblkDsc* Compiler::ehInitTryBlockRange(BasicBlock* blk, BasicBlock** tryBeg, BasicBlock** tryLast) { EHblkDsc* tryTab = ehGetBlockTryDsc(blk); if (tryTab != nullptr) { *tryBeg = tryTab->ebdTryBeg; if (tryLast != nullptr) { *tryLast = tryTab->ebdTryLast; } } else { *tryBeg = nullptr; if (tryLast != nullptr) { *tryLast = nullptr; } } return tryTab; } /***************************************************************************** * This method updates the value of ebdTryBeg */ void Compiler::fgSetTryBeg(EHblkDsc* handlerTab, BasicBlock* newTryBeg) { assert(newTryBeg != nullptr); // Check if we are going to change the existing value of endTryLast // if (handlerTab->ebdTryBeg != newTryBeg) { // Update the EH table with the newTryLast block handlerTab->ebdTryBeg = newTryBeg; JITDUMP("EH#%u: New first block of try: " FMT_BB "\n", ehGetIndex(handlerTab), handlerTab->ebdTryBeg->bbNum); } } /***************************************************************************** * This method updates the value of ebdTryLast. */ void Compiler::fgSetTryEnd(EHblkDsc* handlerTab, BasicBlock* newTryLast) { assert(newTryLast != nullptr); // // Check if we are going to change the existing value of endTryLast // if (handlerTab->ebdTryLast != newTryLast) { // Update the EH table with the newTryLast block handlerTab->ebdTryLast = newTryLast; #ifdef DEBUG if (verbose) { printf("EH#%u: New last block of try: " FMT_BB "\n", ehGetIndex(handlerTab), newTryLast->bbNum); } #endif // DEBUG } } /***************************************************************************** * * This method updates the value of ebdHndLast. */ void Compiler::fgSetHndEnd(EHblkDsc* handlerTab, BasicBlock* newHndLast) { assert(newHndLast != nullptr); // // Check if we are going to change the existing value of endHndLast // if (handlerTab->ebdHndLast != newHndLast) { // Update the EH table with the newHndLast block handlerTab->ebdHndLast = newHndLast; #ifdef DEBUG if (verbose) { printf("EH#%u: New last block of handler: " FMT_BB "\n", ehGetIndex(handlerTab), newHndLast->bbNum); } #endif // DEBUG } } /***************************************************************************** * * Given a EH handler table entry update the ebdTryLast and ebdHndLast pointers * to skip basic blocks that have been removed. They are set to the first * non-removed block after ebdTryBeg and ebdHndBeg, respectively. * * Note that removed blocks are not in the global list of blocks (no block in the * global list points to them). However, their pointers are still valid. We use * this fact when we walk lists of removed blocks until we find a non-removed * block, to be used for ending our iteration. */ void Compiler::fgSkipRmvdBlocks(EHblkDsc* handlerTab) { BasicBlock* block; BasicBlock* bEnd; BasicBlock* bLast; // Update ebdTryLast bLast = nullptr; // Find the first non-removed block after the 'try' region to end our iteration. bEnd = handlerTab->ebdTryLast->bbNext; while ((bEnd != nullptr) && (bEnd->bbFlags & BBF_REMOVED)) { bEnd = bEnd->bbNext; } // Update bLast to account for any removed blocks block = handlerTab->ebdTryBeg; while (block != nullptr) { if ((block->bbFlags & BBF_REMOVED) == 0) { bLast = block; } block = block->bbNext; if (block == bEnd) { break; } } fgSetTryEnd(handlerTab, bLast); // Update ebdHndLast bLast = nullptr; // Find the first non-removed block after the handler region to end our iteration. bEnd = handlerTab->ebdHndLast->bbNext; while ((bEnd != nullptr) && (bEnd->bbFlags & BBF_REMOVED)) { bEnd = bEnd->bbNext; } // Update bLast to account for any removed blocks block = handlerTab->ebdHndBeg; while (block != nullptr) { if ((block->bbFlags & BBF_REMOVED) == 0) { bLast = block; } block = block->bbNext; if (block == bEnd) { break; } } fgSetHndEnd(handlerTab, bLast); } /***************************************************************************** * * Allocate the EH table */ void Compiler::fgAllocEHTable() { #if defined(FEATURE_EH_FUNCLETS) // We need to allocate space for EH clauses that will be used by funclets // as well as one for each EH clause from the IL. Nested EH clauses pulled // out as funclets create one EH clause for each enclosing region. Thus, // the maximum number of clauses we will need might be very large. We allocate // twice the number of EH clauses in the IL, which should be good in practice. // In extreme cases, we might need to abandon this and reallocate. See // fgAddEHTableEntry() for more details. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG compHndBBtabAllocCount = info.compXcptnsCount; // force the resizing code to hit more frequently in DEBUG #else // DEBUG compHndBBtabAllocCount = info.compXcptnsCount * 2; #endif // DEBUG #else // !FEATURE_EH_FUNCLETS compHndBBtabAllocCount = info.compXcptnsCount; #endif // !FEATURE_EH_FUNCLETS compHndBBtab = new (this, CMK_BasicBlock) EHblkDsc[compHndBBtabAllocCount]; compHndBBtabCount = info.compXcptnsCount; } /***************************************************************************** * * Remove a single exception table entry. Note that this changes the size of * the exception table. If calling this within a loop over the exception table * be careful to iterate again on the current entry (if XTnum) to not skip any. */ void Compiler::fgRemoveEHTableEntry(unsigned XTnum) { assert(compHndBBtabCount > 0); assert(XTnum < compHndBBtabCount); EHblkDsc* HBtab; /* Reduce the number of entries in the EH table by one */ compHndBBtabCount--; if (compHndBBtabCount == 0) { // No more entries remaining. // // We used to null out compHndBBtab here, but with OSR + Synch method // we may remove all the initial EH entries if not reachable in the // OSR portion, then need to add one for the synchronous exit. // // So now we just leave it be. } else { /* If we recorded an enclosing index for xtab then see * if it needs to be updated due to the removal of this entry */ HBtab = compHndBBtab + XTnum; for (EHblkDsc* const xtab : EHClauses(this)) { if ((xtab != HBtab) && (xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) && (xtab->ebdEnclosingTryIndex >= XTnum)) { // Update the enclosing scope link if (xtab->ebdEnclosingTryIndex == XTnum) { xtab->ebdEnclosingTryIndex = HBtab->ebdEnclosingTryIndex; } if ((xtab->ebdEnclosingTryIndex > XTnum) && (xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX)) { xtab->ebdEnclosingTryIndex--; } } if ((xtab != HBtab) && (xtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) && (xtab->ebdEnclosingHndIndex >= XTnum)) { // Update the enclosing scope link if (xtab->ebdEnclosingHndIndex == XTnum) { xtab->ebdEnclosingHndIndex = HBtab->ebdEnclosingHndIndex; } if ((xtab->ebdEnclosingHndIndex > XTnum) && (xtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX)) { xtab->ebdEnclosingHndIndex--; } } } /* We need to update all of the blocks' bbTryIndex */ for (BasicBlock* const blk : Blocks()) { if (blk->hasTryIndex()) { if (blk->getTryIndex() == XTnum) { noway_assert(blk->bbFlags & BBF_REMOVED); INDEBUG(blk->setTryIndex(MAX_XCPTN_INDEX);) // Note: this is still a legal index, just unlikely } else if (blk->getTryIndex() > XTnum) { blk->setTryIndex(blk->getTryIndex() - 1); } } if (blk->hasHndIndex()) { if (blk->getHndIndex() == XTnum) { noway_assert(blk->bbFlags & BBF_REMOVED); INDEBUG(blk->setHndIndex(MAX_XCPTN_INDEX);) // Note: this is still a legal index, just unlikely } else if (blk->getHndIndex() > XTnum) { blk->setHndIndex(blk->getHndIndex() - 1); } } } /* Now remove the unused entry from the table */ if (XTnum < compHndBBtabCount) { /* We copy over the old entry */ memmove(HBtab, HBtab + 1, (compHndBBtabCount - XTnum) * sizeof(*HBtab)); } else { /* Last entry. Don't need to do anything */ noway_assert(XTnum == compHndBBtabCount); } } } #if defined(FEATURE_EH_FUNCLETS) /***************************************************************************** * * Add a single exception table entry at index 'XTnum', [0 <= XTnum <= compHndBBtabCount]. * If 'XTnum' is compHndBBtabCount, then add the entry at the end. * Note that this changes the size of the exception table. * All the blocks referring to the various index values are updated. * The table entry itself is not filled in. * Returns a pointer to the new entry. */ EHblkDsc* Compiler::fgAddEHTableEntry(unsigned XTnum) { if (XTnum != compHndBBtabCount) { // Update all enclosing links that will get invalidated by inserting an entry at 'XTnum' for (EHblkDsc* const xtab : EHClauses(this)) { if ((xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) && (xtab->ebdEnclosingTryIndex >= XTnum)) { // Update the enclosing scope link xtab->ebdEnclosingTryIndex++; } if ((xtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) && (xtab->ebdEnclosingHndIndex >= XTnum)) { // Update the enclosing scope link xtab->ebdEnclosingHndIndex++; } } // We need to update the BasicBlock bbTryIndex and bbHndIndex field for all blocks for (BasicBlock* const blk : Blocks()) { if (blk->hasTryIndex() && (blk->getTryIndex() >= XTnum)) { blk->setTryIndex(blk->getTryIndex() + 1); } if (blk->hasHndIndex() && (blk->getHndIndex() >= XTnum)) { blk->setHndIndex(blk->getHndIndex() + 1); } } } // Increase the number of entries in the EH table by one if (compHndBBtabCount == compHndBBtabAllocCount) { // We need to reallocate the table if (compHndBBtabAllocCount == MAX_XCPTN_INDEX) { // We're already at the max size for indices to be unsigned short IMPL_LIMITATION("too many exception clauses"); } // Double the table size. For stress, we could use +1. Note that if the table isn't allocated // yet, such as when we add an EH region for synchronized methods that don't already have one, // we start at zero, so we need to make sure the new table has at least one entry. unsigned newHndBBtabAllocCount = max(1, compHndBBtabAllocCount * 2); noway_assert(compHndBBtabAllocCount < newHndBBtabAllocCount); // check for overflow if (newHndBBtabAllocCount > MAX_XCPTN_INDEX) { newHndBBtabAllocCount = MAX_XCPTN_INDEX; // increase to the maximum size we allow } JITDUMP("*********** fgAddEHTableEntry: increasing EH table size from %d to %d\n", compHndBBtabAllocCount, newHndBBtabAllocCount); compHndBBtabAllocCount = newHndBBtabAllocCount; EHblkDsc* newTable = new (this, CMK_BasicBlock) EHblkDsc[compHndBBtabAllocCount]; // Move over the stuff before the new entry memcpy_s(newTable, compHndBBtabAllocCount * sizeof(*compHndBBtab), compHndBBtab, XTnum * sizeof(*compHndBBtab)); if (XTnum != compHndBBtabCount) { // Move over the stuff after the new entry memcpy_s(newTable + XTnum + 1, (compHndBBtabAllocCount - XTnum - 1) * sizeof(*compHndBBtab), compHndBBtab + XTnum, (compHndBBtabCount - XTnum) * sizeof(*compHndBBtab)); } // Now set the new table as the table to use. The old one gets lost, but we can't // free it because we don't have a freeing allocator. compHndBBtab = newTable; } else if (XTnum != compHndBBtabCount) { // Leave the elements before the new element alone. Move the ones after it, to make space. EHblkDsc* HBtab = compHndBBtab + XTnum; memmove_s(HBtab + 1, (compHndBBtabAllocCount - XTnum - 1) * sizeof(*compHndBBtab), HBtab, (compHndBBtabCount - XTnum) * sizeof(*compHndBBtab)); } // Now the entry is there, but not filled in compHndBBtabCount++; return compHndBBtab + XTnum; } #endif // FEATURE_EH_FUNCLETS #if !FEATURE_EH /***************************************************************************** * fgRemoveEH: To facilitate the bring-up of new platforms without having to * worry about fully implementing EH, we want to simply remove EH constructs * from the IR. This works because a large percentage of our tests contain * EH constructs but don't actually throw exceptions. This function removes * 'catch', 'filter', 'filter-handler', and 'fault' clauses completely. * It requires that the importer has created the EH table, and that normal * EH well-formedness tests have been done, and 'leave' opcodes have been * imported. * * It currently does not handle 'finally' clauses, so tests that include * 'finally' will NYI(). To handle 'finally', we would need to inline the * 'finally' clause IL at each exit from a finally-protected 'try', or * else call the 'finally' clause, like normal. * * Walk the EH table from beginning to end. If a table entry is nested within * a handler, we skip it, as we'll delete its code when we get to the enclosing * handler. If a clause is enclosed within a 'try', or has no nesting, then we delete * it (and its range of code blocks). We don't need to worry about cleaning up * the EH table entries as we remove the individual handlers (such as calling * fgRemoveEHTableEntry()), as we'll null out the entire table at the end. * * This function assumes FEATURE_EH_FUNCLETS is defined. */ void Compiler::fgRemoveEH() { #ifdef DEBUG if (verbose) printf("\n*************** In fgRemoveEH()\n"); #endif // DEBUG if (compHndBBtabCount == 0) { JITDUMP("No EH to remove\n\n"); return; } #ifdef DEBUG if (verbose) { printf("\n*************** Before fgRemoveEH()\n"); fgDispBasicBlocks(); fgDispHandlerTab(); printf("\n"); } #endif // DEBUG // Make sure we're early in compilation, so we don't need to update lots of data structures. assert(!fgComputePredsDone); assert(!fgDomsComputed); assert(!fgFuncletsCreated); assert(fgFirstFuncletBB == nullptr); // this should follow from "!fgFuncletsCreated" assert(!optLoopsMarked); unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { if (HBtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) { // This entry is nested within some other handler. So, don't delete the // EH entry here; let the enclosing handler delete it. Note that for this // EH entry, both the 'try' and handler portions are fully nested within // the enclosing handler region, due to proper nesting rules. continue; } if (HBtab->HasCatchHandler() || HBtab->HasFilter() || HBtab->HasFaultHandler()) { // Remove all the blocks associated with the handler. Note that there is no // fall-through into the handler, or fall-through out of the handler, so // just deleting the blocks is sufficient. Note, however, that for every // BBJ_EHCATCHRET we delete, we need to fix up the reference count of the // block it points to (by subtracting one from its reference count). // Note that the blocks for a filter immediately preceed the blocks for its associated filter-handler. BasicBlock* blkBeg = HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg; BasicBlock* blkLast = HBtab->ebdHndLast; // Splice out the range of blocks from blkBeg to blkLast (inclusive). fgUnlinkRange(blkBeg, blkLast); BasicBlock* blk; // Walk the unlinked blocks and marked them as having been removed. for (blk = blkBeg; blk != blkLast->bbNext; blk = blk->bbNext) { blk->bbFlags |= BBF_REMOVED; if (blk->bbJumpKind == BBJ_EHCATCHRET) { assert(blk->bbJumpDest->bbRefs > 0); blk->bbJumpDest->bbRefs -= 1; } } // Walk the blocks of the 'try' and clear data that makes them appear to be within a 'try'. for (blk = HBtab->ebdTryBeg; blk != HBtab->ebdTryLast->bbNext; blk = blk->bbNext) { blk->clearTryIndex(); blk->bbFlags &= ~BBF_TRY_BEG; } // If we are deleting a range of blocks whose last block is // the 'last' block of an enclosing try/hnd region, we need to // fix up the EH table. We only care about less nested // EH table entries, since we've already deleted everything up to XTnum. unsigned XTnum2; EHblkDsc* HBtab2; for (XTnum2 = XTnum + 1, HBtab2 = compHndBBtab + XTnum2; XTnum2 < compHndBBtabCount; XTnum2++, HBtab2++) { // Handle case where deleted range is at the end of a 'try'. if (HBtab2->ebdTryLast == blkLast) { fgSetTryEnd(HBtab2, blkBeg->bbPrev); } // Handle case where deleted range is at the end of a handler. // (This shouldn't happen, though, because we don't delete handlers // nested within other handlers; we wait until we get to the // enclosing handler.) if (HBtab2->ebdHndLast == blkLast) { unreached(); } } } else { // It must be a 'finally'. We still need to call the finally. Note that the // 'finally' can be "called" from multiple locations (e.g., the 'try' block // can have multiple 'leave' instructions, each leaving to different targets, // and each going through the 'finally'). We could inline the 'finally' at each // LEAVE site within a 'try'. If the 'try' exits at all (that is, no infinite loop), // there will be at least one since there is no "fall through" at the end of // the 'try'. assert(HBtab->HasFinallyHandler()); NYI("remove finally blocks"); } } /* end of the for loop over XTnum */ #ifdef DEBUG // Make sure none of the remaining blocks have any EH. for (BasicBlock* const blk : Blocks()) { assert(!blk->hasTryIndex()); assert(!blk->hasHndIndex()); assert((blk->bbFlags & BBF_TRY_BEG) == 0); assert((blk->bbFlags & BBF_FUNCLET_BEG) == 0); assert((blk->bbFlags & BBF_REMOVED) == 0); assert(blk->bbCatchTyp == BBCT_NONE); } #endif // DEBUG // Delete the EH table compHndBBtab = nullptr; compHndBBtabCount = 0; // Leave compHndBBtabAllocCount alone. // Renumber the basic blocks JITDUMP("\nRenumbering the basic blocks for fgRemoveEH\n"); fgRenumberBlocks(); #ifdef DEBUG if (verbose) { printf("\n*************** After fgRemoveEH()\n"); fgDispBasicBlocks(); fgDispHandlerTab(); printf("\n"); } #endif } #endif // !FEATURE_EH /***************************************************************************** * * Sort the EH table if necessary. */ void Compiler::fgSortEHTable() { if (!fgNeedToSortEHTable) { return; } // Now, all fields of the EH table are set except for those that are related // to nesting. We need to first sort the table to ensure that an EH clause // appears before any try or handler that it is nested within. The CLI spec // requires this for nesting in 'try' clauses, but does not require this // for handler clauses. However, parts of the JIT do assume this ordering. // // For example: // // try { // A // } catch { // try { // B // } catch { // } // } // // In this case, the EH clauses for A and B have no required ordering: the // clause for either A or B can come first, despite B being nested within // the catch clause for A. // // The CLI spec, section 12.4.2.5 "Overview of exception handling", states: // "The ordering of the exception clauses in the Exception Handler Table is // important. If handlers are nested, the most deeply nested try blocks shall // come before the try blocks that enclose them." // // Note, in particular, that it doesn't say "shall come before the *handler* // blocks that enclose them". // // Also, the same section states, "When an exception occurs, the CLI searches // the array for the first protected block that (1) Protects a region including the // current instruction pointer and (2) Is a catch handler block and (3) Whose // filter wishes to handle the exception." // // Once again, nothing about the ordering of the catch blocks. // // A more complicated example: // // try { // A // } catch { // try { // B // try { // C // } catch { // } // } catch { // } // } // // The clause for C must come before the clause for B, but the clause for A can // be anywhere. Thus, we could have these orderings: ACB, CAB, CBA. // // One more example: // // try { // A // } catch { // try { // B // } catch { // try { // C // } catch { // } // } // } // // There is no ordering requirement: the EH clauses can come in any order. // // In Dev11 (Visual Studio 2012), x86 did not sort the EH table (it never had before) // but ARM did. It turns out not sorting the table can cause the EH table to incorrectly // set the bbHndIndex value in some nested cases, and that can lead to a security exploit // that allows the execution of arbitrary code. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("fgSortEHTable: Sorting EH table\n"); } #endif // DEBUG EHblkDsc* xtab1; EHblkDsc* xtab2; unsigned xtabnum1, xtabnum2; for (xtabnum1 = 0, xtab1 = compHndBBtab; xtabnum1 < compHndBBtabCount; xtabnum1++, xtab1++) { for (xtabnum2 = xtabnum1 + 1, xtab2 = xtab1 + 1; xtabnum2 < compHndBBtabCount; xtabnum2++, xtab2++) { // If the nesting is wrong, swap them. The nesting is wrong if // EH region 2 is nested in the try, handler, or filter of EH region 1. // Note that due to proper nesting rules, if any of 2 is nested in // the try or handler or filter of 1, then all of 2 is nested. // We must be careful when comparing the offsets of the 'try' clause, because // for "mutually-protect" try/catch, the 'try' bodies will be identical. // For this reason, we use the handler region to check nesting. Note // that we must check both beginning and end: a nested region can have a 'try' // body that starts at the beginning of a handler. Thus, if we just compared the // handler begin offset, we might get confused and think it is nested. IL_OFFSET hndBegOff = xtab2->ebdHndBegOffset; IL_OFFSET hndEndOff = xtab2->ebdHndEndOffset; assert(hndEndOff > hndBegOff); if ((hndBegOff >= xtab1->ebdTryBegOffset && hndEndOff <= xtab1->ebdTryEndOffset) || (hndBegOff >= xtab1->ebdHndBegOffset && hndEndOff <= xtab1->ebdHndEndOffset) || (xtab1->HasFilter() && (hndBegOff >= xtab1->ebdFilterBegOffset && hndEndOff <= xtab1->ebdHndBegOffset)) // Note that end of filter is beginning of handler ) { #ifdef DEBUG if (verbose) { printf("fgSortEHTable: Swapping out-of-order EH#%u and EH#%u\n", xtabnum1, xtabnum2); } // Assert that the 'try' region is also nested in the same place as the handler IL_OFFSET tryBegOff = xtab2->ebdTryBegOffset; IL_OFFSET tryEndOff = xtab2->ebdTryEndOffset; assert(tryEndOff > tryBegOff); if (hndBegOff >= xtab1->ebdTryBegOffset && hndEndOff <= xtab1->ebdTryEndOffset) { assert(tryBegOff >= xtab1->ebdTryBegOffset && tryEndOff <= xtab1->ebdTryEndOffset); } if (hndBegOff >= xtab1->ebdHndBegOffset && hndEndOff <= xtab1->ebdHndEndOffset) { assert(tryBegOff >= xtab1->ebdHndBegOffset && tryEndOff <= xtab1->ebdHndEndOffset); } if (xtab1->HasFilter() && (hndBegOff >= xtab1->ebdFilterBegOffset && hndEndOff <= xtab1->ebdHndBegOffset)) { assert(tryBegOff >= xtab1->ebdFilterBegOffset && tryEndOff <= xtab1->ebdHndBegOffset); } #endif // DEBUG // Swap them! EHblkDsc tmp = *xtab1; *xtab1 = *xtab2; *xtab2 = tmp; } } } } // fgNormalizeEH: Enforce the following invariants: // // 1. No block is both the first block of a handler and the first block of a try. In IL (and on entry // to this function), this can happen if the "try" is more nested than the handler. // // For example, consider: // // try1 ----------------- BB01 // | BB02 // |--------------------- BB03 // handler1 // |----- try2 ---------- BB04 // | | BB05 // | handler2 ------ BB06 // | | BB07 // | --------------- BB08 // |--------------------- BB09 // // Thus, the start of handler1 and the start of try2 are the same block. We will transform this to: // // try1 ----------------- BB01 // | BB02 // |--------------------- BB03 // handler1 ------------- BB10 // empty block // | try2 ---------- BB04 // | | BB05 // | handler2 ------ BB06 // | | BB07 // | --------------- BB08 // |--------------------- BB09 // // 2. No block is the first block of more than one try or handler region. // (Note that filters cannot have EH constructs nested within them, so there can be no nested try or // handler that shares the filter begin or last block. For try/filter/filter-handler constructs nested // within a try or handler region, note that the filter block cannot be the first block of the try, // nor can it be the first block of the handler, since you can't "fall into" a filter, which that situation // would require.) // // For example, we will transform this: // // try3 try2 try1 // |--- |--- |--- BB01 // | | | BB02 // | | |--- BB03 // | | BB04 // | |------------ BB05 // | BB06 // |------------------- BB07 // // to this: // // try3 ------------- BB08 // empty BBJ_NONE block // | try2 ------ BB09 // empty BBJ_NONE block // | | try1 // | | |--- BB01 // | | | BB02 // | | |--- BB03 // | | BB04 // | |------------ BB05 // | BB06 // |------------------- BB07 // // The benefit of this is that adding a block to an EH region will not require examining every EH region, // looking for possible shared "first" blocks to adjust. It also makes it easier to put code at the top // of a particular EH region, especially for loop optimizations. // // These empty blocks (BB08, BB09) will generate no code (unless some code is subsequently placed into them), // and will have the same native code offset as BB01 after code is generated. There may be labels generated // for them, if they are branch targets, so it is possible to have multiple labels targeting the same native // code offset. The blocks will not be merged with the blocks they are split from, because they will have a // different EH region, and we don't merge blocks from two different EH regions. // // In the example, if there are branches to BB01, we need to distribute them to BB01, BB08, or BB09, appropriately. // 1. A branch from BB01/BB02/BB03 to BB01 will still go to BB01. Branching to BB09 or BB08 would not be legal, // since it would branch out of a try region. // 2. A branch from BB04/BB05 to BB01 will instead branch to BB09. Branching to BB08 would not be legal. Note // that branching to BB01 would still be legal, so we have a choice. It makes the most sense to branch to BB09, // so the source and target of a branch are in the same EH region. // 3. Similarly, a branch from BB06/BB07 to BB01 will go to BB08, even though branching to BB09 would be legal. // 4. A branch from outside this loop (at the top-level) to BB01 will go to BB08. This is one case where the // source and target of the branch are not in the same EH region. // // The EH nesting rules for IL branches are described in the ECMA spec section 12.4.2.8.2.7 "Branches" and // section 12.4.2.8.2.9 "Examples". // // There is one exception to this normalization rule: we do not change "mutually protect" regions. These are cases // where two EH table entries have exactly the same 'try' region, used to implement C# "try / catch / catch". // The first handler appears by our nesting to be an "inner" handler, with ebdEnclosingTryIndex pointing to the // second one. It is not true nesting, though, since they both protect the same "try". Both the these EH table // entries must keep the same "try" region begin/last block pointers. A block in this "try" region has a try index // of the first ("most nested") EH table entry. // // 3. No block is the last block of more than one try or handler region. Again, as described above, // filters need not be considered. // // For example, we will transform this: // // try3 ----------------- BB01 // | try2 ---------- BB02 // | | handler1 BB03 // | | | BB04 // |----- |----- |------- BB05 // // (where all three try regions end at BB05) to this: // // try3 ----------------- BB01 // | try2 ---------- BB02 // | | handler1 BB03 // | | | BB04 // | | |------- BB05 // | |-------------- BB06 // empty BBJ_NONE block // |--------------------- BB07 // empty BBJ_NONE block // // No branches need to change: if something branched to BB05, it will still branch to BB05. If BB05 is a // BBJ_NONE block, then control flow will fall through the newly added blocks as well. If it is anything // else, it will retain that block branch type and BB06 and BB07 will be unreachable. // // The benefit of this is, once again, to remove the need to consider every EH region when adding new blocks. // // Overall, a block can appear in the EH table exactly once: as the begin or last block of a single try, filter, or // handler. There is one exception: for a single-block EH region, the block can appear as both the "begin" and "last" // block of the try, or the "begin" and "last" block of the handler (note that filters don't have a "last" block stored, // so this case doesn't apply.) // (Note: we could remove this special case if we wanted, and if it helps anything, but it doesn't appear that it will // help.) // // These invariants simplify a number of things. When inserting a new block into a region, it is not necessary to // traverse the entire EH table looking to see if any EH region needs to be updated. You only ever need to update a // single region (except for mutually-protect "try" regions). // // Also, for example, when we're trying to determine the successors of a block B1 that leads into a try T1, if a block // B2 violates invariant #3 by being the first block of both the handler of T1, and an enclosed try T2, inserting a // block to enforce this invariant prevents us from having to consider the first block of T2's handler as a possible // successor of B1. This is somewhat akin to breaking of "critical edges" in a flowgraph. void Compiler::fgNormalizeEH() { if (compHndBBtabCount == 0) { // No EH? Nothing to do. INDEBUG(fgNormalizeEHDone = true;) return; } #ifdef DEBUG if (verbose) { printf("*************** In fgNormalizeEH()\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif bool modified = false; // Case #1: Prevent the first block of a handler from also being the first block of a 'try'. if (fgNormalizeEHCase1()) { modified = true; } // Case #2: Prevent any two EH regions from starting with the same block (after case #3, we only need to worry about // 'try' blocks). if (fgNormalizeEHCase2()) { modified = true; } #if 0 // Case 3 normalization is disabled. The JIT really doesn't like having extra empty blocks around, especially // blocks that are unreachable. There are lots of asserts when such things occur. We will re-evaluate whether we // can do this normalization. // Note: there are cases in fgVerifyHandlerTab() that are also disabled to match this. // Case #3: Prevent any two EH regions from ending with the same block. if (fgNormalizeEHCase3()) { modified = true; } #endif // 0 INDEBUG(fgNormalizeEHDone = true;) if (modified) { // If we computed the cheap preds, don't let them leak out, in case other code doesn't maintain them properly. if (fgCheapPredsValid) { fgRemovePreds(); } JITDUMP("Added at least one basic block in fgNormalizeEH.\n"); fgRenumberBlocks(); #ifdef DEBUG // fgRenumberBlocks() will dump all the blocks and the handler table, so we don't need to do it here. fgVerifyHandlerTab(); #endif } else { JITDUMP("No EH normalization performed.\n"); } } bool Compiler::fgNormalizeEHCase1() { bool modified = false; // // Case #1: Is the first block of a handler also the first block of any try? // // Do this as a separate loop from case #2 to simplify the logic for cases where we have both multiple identical // 'try' begin blocks as well as this case, e.g.: // try { // } finally { try { try { // } catch {} // } catch {} // } // where the finally/try/try are all the same block. // We also do this before case #2, so when we get to case #2, we only need to worry about updating 'try' begin // blocks (and only those within the 'try' region's parents), not handler begin blocks, when we are inserting new // header blocks. // for (unsigned XTnum = 0; XTnum < compHndBBtabCount; XTnum++) { EHblkDsc* eh = ehGetDsc(XTnum); BasicBlock* handlerStart = eh->ebdHndBeg; EHblkDsc* handlerStartContainingTry = ehGetBlockTryDsc(handlerStart); // If the handler start block is in a try, and is in fact the first block of that try... if (handlerStartContainingTry != nullptr && handlerStartContainingTry->ebdTryBeg == handlerStart) { // ...then we want to insert an empty, non-removable block outside the try to be the new first block of the // handler. BasicBlock* newHndStart = bbNewBasicBlock(BBJ_NONE); fgInsertBBbefore(eh->ebdHndBeg, newHndStart); #ifdef DEBUG if (verbose) { printf("Handler begin for EH#%02u and 'try' begin for EH%02u are the same block; inserted new " FMT_BB " " "before " FMT_BB " as new handler begin for EH#%u.\n", XTnum, ehGetIndex(handlerStartContainingTry), newHndStart->bbNum, eh->ebdHndBeg->bbNum, XTnum); } #endif // DEBUG // The new block is the new handler begin. eh->ebdHndBeg = newHndStart; // Try index is the same as the enclosing try, if any, of eh: if (eh->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { newHndStart->clearTryIndex(); } else { newHndStart->setTryIndex(eh->ebdEnclosingTryIndex); } newHndStart->setHndIndex(XTnum); newHndStart->bbCatchTyp = handlerStart->bbCatchTyp; handlerStart->bbCatchTyp = BBCT_NONE; // Now handlerStart is no longer the start of a handler... newHndStart->bbCodeOffs = handlerStart->bbCodeOffs; newHndStart->bbCodeOffsEnd = newHndStart->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead? newHndStart->inheritWeight(handlerStart); newHndStart->bbFlags |= (BBF_DONT_REMOVE | BBF_INTERNAL); modified = true; #ifdef DEBUG if (0 && verbose) // Normally this is way too verbose, but it is useful for debugging { printf("*************** fgNormalizeEH() made a change\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } } return modified; } bool Compiler::fgNormalizeEHCase2() { bool modified = false; // // Case #2: Make sure no two 'try' have the same begin block (except for mutually-protect regions). // Note that this can only happen for nested 'try' regions, so we only need to look through the // 'try' nesting hierarchy. // for (unsigned XTnum = 0; XTnum < compHndBBtabCount; XTnum++) { EHblkDsc* eh = ehGetDsc(XTnum); if (eh->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) { BasicBlock* tryStart = eh->ebdTryBeg; BasicBlock* insertBeforeBlk = tryStart; // If we need to insert new blocks, we insert before this block. // We need to keep track of the last "mutually protect" region so we can properly not add additional header // blocks to the second and subsequent mutually protect try blocks. We can't just keep track of the EH // region pointer, because we're updating the 'try' begin blocks as we go. So, we need to keep track of the // pre-update 'try' begin/last blocks themselves. BasicBlock* mutualTryBeg = eh->ebdTryBeg; BasicBlock* mutualTryLast = eh->ebdTryLast; unsigned mutualProtectIndex = XTnum; EHblkDsc* ehOuter = eh; do { unsigned ehOuterTryIndex = ehOuter->ebdEnclosingTryIndex; ehOuter = ehGetDsc(ehOuterTryIndex); BasicBlock* outerTryStart = ehOuter->ebdTryBeg; if (outerTryStart == tryStart) { // We found two EH regions with the same 'try' begin! Should we do something about it? if (ehOuter->ebdIsSameTry(mutualTryBeg, mutualTryLast)) { // clang-format off // Don't touch mutually-protect regions: their 'try' regions must remain identical! // We want to continue the looping outwards, in case we have something like this: // // try3 try2 try1 // |--- |---- |---- BB01 // | | | BB02 // | |---- |---- BB03 // | BB04 // |------------------- BB05 // // (Thus, try1 & try2 are mutually-protect 'try' regions from BB01 to BB03. They are nested inside try3, // which also starts at BB01. The 'catch' clauses have been elided.) // In this case, we'll decline to add a new header block for try2, but we will add a new one for try3, ending with: // // try3 try2 try1 // |------------------- BB06 // | |---- |---- BB01 // | | | BB02 // | |---- |---- BB03 // | BB04 // |------------------- BB05 // // More complicated (yes, this is real): // // try { // try { // try { // try { // try { // try { // try { // try { // } // catch {} // mutually-protect set #1 // catch {} // } finally {} // } // catch {} // mutually-protect set #2 // catch {} // catch {} // } finally {} // } catch {} // } finally {} // } catch {} // } finally {} // // In this case, all the 'try' start at the same block! Note that there are two sets of mutually-protect regions, // separated by some nesting. // clang-format on #ifdef DEBUG if (verbose) { printf("Mutually protect regions EH#%u and EH#%u; leaving identical 'try' begin blocks.\n", mutualProtectIndex, ehGetIndex(ehOuter)); } #endif // DEBUG // We still need to update the tryBeg, if something more nested already did that. ehOuter->ebdTryBeg = insertBeforeBlk; } else { // We're in a new set of mutual protect regions, so don't compare against the original. mutualTryBeg = ehOuter->ebdTryBeg; mutualTryLast = ehOuter->ebdTryLast; mutualProtectIndex = ehOuterTryIndex; // We're going to need the preds. We compute them here, before inserting the new block, // so our logic to add/remove preds below is the same for both the first time preds are // created and subsequent times. if (!fgCheapPredsValid) { fgComputeCheapPreds(); } // We've got multiple 'try' blocks starting at the same place! // Add a new first 'try' block for 'ehOuter' that will be outside 'eh'. BasicBlock* newTryStart = bbNewBasicBlock(BBJ_NONE); fgInsertBBbefore(insertBeforeBlk, newTryStart); insertBeforeBlk->bbRefs++; #ifdef DEBUG if (verbose) { printf("'try' begin for EH#%u and EH#%u are same block; inserted new " FMT_BB " before " FMT_BB " " "as new 'try' begin for EH#%u.\n", ehOuterTryIndex, XTnum, newTryStart->bbNum, insertBeforeBlk->bbNum, ehOuterTryIndex); } #endif // DEBUG // The new block is the new 'try' begin. ehOuter->ebdTryBeg = newTryStart; newTryStart->copyEHRegion(tryStart); // Copy the EH region info newTryStart->setTryIndex(ehOuterTryIndex); // ... but overwrite the 'try' index newTryStart->bbCatchTyp = BBCT_NONE; newTryStart->bbCodeOffs = tryStart->bbCodeOffs; newTryStart->bbCodeOffsEnd = newTryStart->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead? newTryStart->inheritWeight(tryStart); // Note that we don't need to clear any flags on the old try start, since it is still a 'try' // start. newTryStart->bbFlags |= (BBF_TRY_BEG | BBF_DONT_REMOVE | BBF_INTERNAL); if (insertBeforeBlk->bbFlags & BBF_BACKWARD_JUMP_TARGET) { newTryStart->bbFlags |= BBF_BACKWARD_JUMP_TARGET; } // Now we need to split any flow edges targetting the old try begin block between the old // and new block. Note that if we are handling a multiply-nested 'try', we may have already // split the inner set. So we need to split again, from the most enclosing block that we've // already created, namely, insertBeforeBlk. // // For example: // // try3 try2 try1 // |---- |---- |---- BB01 // | | | BB02 // | | |---- BB03 // | |----------- BB04 // |------------------ BB05 // // We'll loop twice, to create two header blocks, one for try2, and the second time for try3 // (in that order). // After the first loop, we have: // // try3 try2 try1 // |---- BB06 // |---- | |---- BB01 // | | | BB02 // | | |---- BB03 // | |----------- BB04 // |------------------ BB05 // // And all the external edges have been changed to point at try2. On the next loop, we'll create // a unique header block for try3, and split the edges between try2 and try3, leaving us with: // // try3 try2 try1 // |---- BB07 // | |---- BB06 // | | |---- BB01 // | | | BB02 // | | |---- BB03 // | |----------- BB04 // |------------------ BB05 BasicBlockList* nextPred; // we're going to update the pred list as we go, so we need to keep // track of the next pred in case it gets deleted. for (BasicBlockList* pred = insertBeforeBlk->bbCheapPreds; pred != nullptr; pred = nextPred) { nextPred = pred->next; // Who gets this predecessor? BasicBlock* predBlock = pred->block; if (!BasicBlock::sameTryRegion(insertBeforeBlk, predBlock)) { // Move the edge to target newTryStart instead of insertBeforeBlk. fgAddCheapPred(newTryStart, predBlock); fgRemoveCheapPred(insertBeforeBlk, predBlock); // Now change the branch. If it was a BBJ_NONE fall-through to the top block, this will // do nothing. Since cheap preds contains dups (for switch duplicates), we will call // this once per dup. fgReplaceJumpTarget(predBlock, newTryStart, insertBeforeBlk); // Need to adjust ref counts here since we're retargeting edges. newTryStart->bbRefs++; assert(insertBeforeBlk->countOfInEdges() > 0); insertBeforeBlk->bbRefs--; #ifdef DEBUG if (verbose) { printf("Redirect " FMT_BB " target from " FMT_BB " to " FMT_BB ".\n", predBlock->bbNum, insertBeforeBlk->bbNum, newTryStart->bbNum); } #endif // DEBUG } } // The new block (a fall-through block) is a new predecessor. fgAddCheapPred(insertBeforeBlk, newTryStart); // We don't need to update the tryBeg block of other EH regions here because we are looping // outwards in enclosing try index order, and we'll get to them later. // Move the insert block backwards, to the one we just inserted. insertBeforeBlk = insertBeforeBlk->bbPrev; assert(insertBeforeBlk == newTryStart); modified = true; #ifdef DEBUG if (0 && verbose) // Normally this is way too verbose, but it is useful for debugging { printf("*************** fgNormalizeEH() made a change\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } } else { // If the 'try' start block in the outer block isn't the same, then none of the more-enclosing // try regions (if any) can have the same 'try' start block, so we're done. // Note that we could have a situation like this: // // try4 try3 try2 try1 // |--- |--- | | BB01 // | | | | BB02 // | | |---- |---- BB03 // | | | BB04 // | | |------------ BB05 // | | BB06 // | |------------------- BB07 // |-------------------------- BB08 // // (Thus, try1 & try2 start at BB03, and are nested inside try3 & try4, which both start at BB01.) // In this case, we'll process try1 and try2, then break out. Later, we'll get to try3 and process // it and try4. break; } } while (ehOuter->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX); } } return modified; } bool Compiler::fgNormalizeEHCase3() { bool modified = false; // // Case #3: Make sure no two 'try' or handler regions have the same 'last' block (except for mutually protect 'try' // regions). As above, there has to be EH region nesting for this to occur. However, since we need to consider // handlers, there are more cases. // // There are four cases to consider: // (1) try nested in try // (2) handler nested in try // (3) try nested in handler // (4) handler nested in handler // // Note that, before funclet generation, it would be unusual, though legal IL, for a 'try' to come at the end // of an EH region (either 'try' or handler region), since that implies that its corresponding handler precedes it. // That will never happen in C#, but is legal in IL. // // Only one of these cases can happen. For example, if we have case (2), where a try/catch is nested in a 'try' and // the nested handler has the same 'last' block as the outer handler, then, due to nesting rules, the nested 'try' // must also be within the outer handler, and obviously cannot share the same 'last' block. // for (unsigned XTnum = 0; XTnum < compHndBBtabCount; XTnum++) { EHblkDsc* eh = ehGetDsc(XTnum); // Find the EH region 'eh' is most nested within, either 'try' or handler or none. bool outerIsTryRegion; unsigned ehOuterIndex = eh->ebdGetEnclosingRegionIndex(&outerIsTryRegion); if (ehOuterIndex != EHblkDsc::NO_ENCLOSING_INDEX) { EHblkDsc* ehInner = eh; // This gets updated as we loop outwards in the EH nesting unsigned ehInnerIndex = XTnum; // This gets updated as we loop outwards in the EH nesting bool innerIsTryRegion; EHblkDsc* ehOuter = ehGetDsc(ehOuterIndex); // Debugging: say what type of block we're updating. INDEBUG(const char* outerType = ""; const char* innerType = "";) // 'insertAfterBlk' is the place we will insert new "normalization" blocks. We don't know yet if we will // insert them after the innermost 'try' or handler's "last" block, so we set it to nullptr. Once we // determine the innermost region that is equivalent, we set this, and then update it incrementally as we // loop outwards. BasicBlock* insertAfterBlk = nullptr; bool foundMatchingLastBlock = false; // This is set to 'false' for mutual protect regions for which we will not insert a normalization block. bool insertNormalizationBlock = true; // Keep track of what the 'try' index and handler index should be for any new normalization block that we // insert. If we have a sequence of alternating nested 'try' and handlers with the same 'last' block, we'll // need to update these as we go. For example: // try { // EH#5 // ... // catch { // EH#4 // ... // try { // EH#3 // ... // catch { // EH#2 // ... // try { // EH#1 // BB01 // try=1, hnd=2 // } } } } } // all the 'last' blocks are the same // // after normalization: // // try { // EH#5 // ... // catch { // EH#4 // ... // try { // EH#3 // ... // catch { // EH#2 // ... // try { // EH#1 // BB01 // try=1, hnd=2 // } // BB02 // try=3, hnd=2 // } // BB03 // try=3, hnd=4 // } // BB04 // try=5, hnd=4 // } // BB05 // try=5, hnd=0 (no enclosing hnd) // } // unsigned nextTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; // Initialization only needed to quell compiler // warnings. unsigned nextHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; // We compare the outer region against the inner region's 'try' or handler, determined by the // 'outerIsTryRegion' variable. Once we decide that, we know exactly the 'last' pointer that we will use to // compare against all enclosing EH regions. // // For example, if we have these nested EH regions (omitting some corresponding try/catch clauses for each // nesting level): // // try { // ... // catch { // ... // try { // } } } // all the 'last' blocks are the same // // then we determine that the innermost region we are going to compare against is the 'try' region. There's // no reason to compare against its handler region for any enclosing region (since it couldn't possibly // share a 'last' block with the enclosing region). However, there's no harm, either (and it simplifies // the code for the first set of comparisons to be the same as subsequent, more enclosing cases). BasicBlock* lastBlockPtrToCompare = nullptr; // We need to keep track of the last "mutual protect" region so we can properly not add additional blocks // to the second and subsequent mutual protect try blocks. We can't just keep track of the EH region // pointer, because we're updating the last blocks as we go. So, we need to keep track of the // pre-update 'try' begin/last blocks themselves. These only matter if the "last" blocks that match are // from two (or more) nested 'try' regions. BasicBlock* mutualTryBeg = nullptr; BasicBlock* mutualTryLast = nullptr; if (outerIsTryRegion) { nextTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; // unused, since the outer block is a 'try' region. // The outer (enclosing) region is a 'try' if (ehOuter->ebdTryLast == ehInner->ebdTryLast) { // Case (1) try nested in try. foundMatchingLastBlock = true; INDEBUG(innerType = "try"; outerType = "try";) insertAfterBlk = ehOuter->ebdTryLast; lastBlockPtrToCompare = insertAfterBlk; if (EHblkDsc::ebdIsSameTry(ehOuter, ehInner)) { // We can't touch this 'try', since it's mutual protect. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("Mutual protect regions EH#%u and EH#%u; leaving identical 'try' last blocks.\n", ehOuterIndex, ehInnerIndex); } #endif // DEBUG insertNormalizationBlock = false; } else { nextHndIndex = ehInner->ebdTryLast->hasHndIndex() ? ehInner->ebdTryLast->getHndIndex() : EHblkDsc::NO_ENCLOSING_INDEX; } } else if (ehOuter->ebdTryLast == ehInner->ebdHndLast) { // Case (2) handler nested in try. foundMatchingLastBlock = true; INDEBUG(innerType = "handler"; outerType = "try";) insertAfterBlk = ehOuter->ebdTryLast; lastBlockPtrToCompare = insertAfterBlk; assert(ehInner->ebdHndLast->getHndIndex() == ehInnerIndex); nextHndIndex = ehInner->ebdEnclosingHndIndex; } else { // No "last" pointers match! } if (foundMatchingLastBlock) { // The outer might be part of a new set of mutual protect regions (if it isn't part of one already). mutualTryBeg = ehOuter->ebdTryBeg; mutualTryLast = ehOuter->ebdTryLast; } } else { nextHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; // unused, since the outer block is a handler region. // The outer (enclosing) region is a handler (note that it can't be a filter; there is no nesting // within a filter). if (ehOuter->ebdHndLast == ehInner->ebdTryLast) { // Case (3) try nested in handler. foundMatchingLastBlock = true; INDEBUG(innerType = "try"; outerType = "handler";) insertAfterBlk = ehOuter->ebdHndLast; lastBlockPtrToCompare = insertAfterBlk; assert(ehInner->ebdTryLast->getTryIndex() == ehInnerIndex); nextTryIndex = ehInner->ebdEnclosingTryIndex; } else if (ehOuter->ebdHndLast == ehInner->ebdHndLast) { // Case (4) handler nested in handler. foundMatchingLastBlock = true; INDEBUG(innerType = "handler"; outerType = "handler";) insertAfterBlk = ehOuter->ebdHndLast; lastBlockPtrToCompare = insertAfterBlk; nextTryIndex = ehInner->ebdTryLast->hasTryIndex() ? ehInner->ebdTryLast->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX; } else { // No "last" pointers match! } } while (foundMatchingLastBlock) { assert(lastBlockPtrToCompare != nullptr); assert(insertAfterBlk != nullptr); assert(ehOuterIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(ehOuter != nullptr); // Add a normalization block if (insertNormalizationBlock) { // Add a new last block for 'ehOuter' that will be outside the EH region with which it encloses and // shares a 'last' pointer BasicBlock* newLast = bbNewBasicBlock(BBJ_NONE); assert(insertAfterBlk != nullptr); fgInsertBBafter(insertAfterBlk, newLast); #ifdef DEBUG if (verbose) { printf( "last %s block for EH#%u and last %s block for EH#%u are same block; inserted new " FMT_BB " after " FMT_BB " as new last %s block for EH#%u.\n", outerType, ehOuterIndex, innerType, ehInnerIndex, newLast->bbNum, insertAfterBlk->bbNum, outerType, ehOuterIndex); } #endif // DEBUG if (outerIsTryRegion) { ehOuter->ebdTryLast = newLast; newLast->setTryIndex(ehOuterIndex); if (nextHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) { newLast->clearHndIndex(); } else { newLast->setHndIndex(nextHndIndex); } } else { ehOuter->ebdHndLast = newLast; if (nextTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { newLast->clearTryIndex(); } else { newLast->setTryIndex(nextTryIndex); } newLast->setHndIndex(ehOuterIndex); } newLast->bbCatchTyp = BBCT_NONE; // bbCatchTyp is only set on the first block of a handler, which is this not newLast->bbCodeOffs = insertAfterBlk->bbCodeOffsEnd; newLast->bbCodeOffsEnd = newLast->bbCodeOffs; // code size = 0. TODO: use BAD_IL_OFFSET instead? newLast->inheritWeight(insertAfterBlk); newLast->bbFlags |= BBF_INTERNAL; // The new block (a fall-through block) is a new predecessor. if (fgCheapPredsValid) { fgAddCheapPred(newLast, insertAfterBlk); } // Move the insert pointer. More enclosing equivalent 'last' blocks will be inserted after this. insertAfterBlk = newLast; modified = true; #ifdef DEBUG if (verbose) // Normally this is way too verbose, but it is useful for debugging { printf("*************** fgNormalizeEH() made a change\n"); fgDispBasicBlocks(); fgDispHandlerTab(); } #endif // DEBUG } // Now find the next outer enclosing EH region and see if it also shares the last block. foundMatchingLastBlock = false; // assume nothing will match ehInner = ehOuter; ehInnerIndex = ehOuterIndex; innerIsTryRegion = outerIsTryRegion; ehOuterIndex = ehOuter->ebdGetEnclosingRegionIndex(&outerIsTryRegion); // Loop outwards in the EH nesting. if (ehOuterIndex != EHblkDsc::NO_ENCLOSING_INDEX) { // There are more enclosing regions; check for equivalent 'last' pointers. INDEBUG(innerType = outerType; outerType = "";) ehOuter = ehGetDsc(ehOuterIndex); insertNormalizationBlock = true; // assume it's not mutual protect if (outerIsTryRegion) { nextTryIndex = EHblkDsc::NO_ENCLOSING_INDEX; // unused, since the outer block is a 'try' region. // The outer (enclosing) region is a 'try' if (ehOuter->ebdTryLast == lastBlockPtrToCompare) { // Case (1) and (2): try or handler nested in try. foundMatchingLastBlock = true; INDEBUG(outerType = "try";) if (innerIsTryRegion && ehOuter->ebdIsSameTry(mutualTryBeg, mutualTryLast)) { // We can't touch this 'try', since it's mutual protect. CLANG_FORMAT_COMMENT_ANCHOR; #ifdef DEBUG if (verbose) { printf("Mutual protect regions EH#%u and EH#%u; leaving identical 'try' last " "blocks.\n", ehOuterIndex, ehInnerIndex); } #endif // DEBUG insertNormalizationBlock = false; // We still need to update the 'last' pointer, in case someone inserted a normalization // block before the start of the mutual protect 'try' region. ehOuter->ebdTryLast = insertAfterBlk; } else { if (innerIsTryRegion) { // Case (1) try nested in try. nextHndIndex = ehInner->ebdTryLast->hasHndIndex() ? ehInner->ebdTryLast->getHndIndex() : EHblkDsc::NO_ENCLOSING_INDEX; } else { // Case (2) handler nested in try. assert(ehInner->ebdHndLast->getHndIndex() == ehInnerIndex); nextHndIndex = ehInner->ebdEnclosingHndIndex; } } // The outer might be part of a new set of mutual protect regions (if it isn't part of one // already). mutualTryBeg = ehOuter->ebdTryBeg; mutualTryLast = ehOuter->ebdTryLast; } } else { nextHndIndex = EHblkDsc::NO_ENCLOSING_INDEX; // unused, since the outer block is a handler region. // The outer (enclosing) region is a handler (note that it can't be a filter; there is no // nesting within a filter). if (ehOuter->ebdHndLast == lastBlockPtrToCompare) { // Case (3) and (4): try nested in try or handler. foundMatchingLastBlock = true; INDEBUG(outerType = "handler";) if (innerIsTryRegion) { // Case (3) try nested in handler. assert(ehInner->ebdTryLast->getTryIndex() == ehInnerIndex); nextTryIndex = ehInner->ebdEnclosingTryIndex; } else { // Case (4) handler nested in handler. nextTryIndex = ehInner->ebdTryLast->hasTryIndex() ? ehInner->ebdTryLast->getTryIndex() : EHblkDsc::NO_ENCLOSING_INDEX; } } } } // If we get to here and foundMatchingLastBlock is false, then the inner and outer region don't share // any 'last' blocks, so we're done. Note that we could have a situation like this: // // try4 try3 try2 try1 // |---- | | | BB01 // | |---- | | BB02 // | | |---- | BB03 // | | | |----- BB04 // | | |----- |----- BB05 // |---- |------------------- BB06 // // (Thus, try1 & try2 end at BB05, and are nested inside try3 & try4, which both end at BB06.) // In this case, we'll process try1 and try2, then break out. Later, as we iterate through the EH table, // we'll get to try3 and process it and try4. } // end while (foundMatchingLastBlock) } // if (ehOuterIndex != EHblkDsc::NO_ENCLOSING_INDEX) } // EH table iteration return modified; } /*****************************************************************************/ #ifdef DEBUG void Compiler::dispIncomingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause) { printf("EH clause #%u:\n", num); printf(" Flags: 0x%x", clause.Flags); // Note: the flags field is kind of weird. It should be compared for equality // to determine the type of clause, even though it looks like a bitfield. In // Particular, CORINFO_EH_CLAUSE_NONE is zero, so you can't use "&" to check it. const DWORD CORINFO_EH_CLAUSE_TYPE_MASK = 0x7; switch (clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK) { case CORINFO_EH_CLAUSE_NONE: printf(" (catch)"); break; case CORINFO_EH_CLAUSE_FILTER: printf(" (filter)"); break; case CORINFO_EH_CLAUSE_FINALLY: printf(" (finally)"); break; case CORINFO_EH_CLAUSE_FAULT: printf(" (fault)"); break; default: printf(" (UNKNOWN type %u!)", clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK); break; } if (clause.Flags & ~CORINFO_EH_CLAUSE_TYPE_MASK) { printf(" (extra unknown bits: 0x%x)", clause.Flags & ~CORINFO_EH_CLAUSE_TYPE_MASK); } printf("\n"); printf(" TryOffset: 0x%x\n", clause.TryOffset); printf(" TryLength: 0x%x\n", clause.TryLength); printf(" HandlerOffset: 0x%x\n", clause.HandlerOffset); printf(" HandlerLength: 0x%x\n", clause.HandlerLength); if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { printf(" FilterOffset: 0x%x\n", clause.FilterOffset); } else { printf(" ClassToken: 0x%x\n", clause.ClassToken); } } void Compiler::dispOutgoingEHClause(unsigned num, const CORINFO_EH_CLAUSE& clause) { if (opts.dspDiffable) { /* (( brace matching editor workaround to compensate for the following line */ printf("EH#%u: try [%s..%s) handled by [%s..%s) ", num, GetEmitter()->emitOffsetToLabel(clause.TryOffset), GetEmitter()->emitOffsetToLabel(clause.TryLength), GetEmitter()->emitOffsetToLabel(clause.HandlerOffset), GetEmitter()->emitOffsetToLabel(clause.HandlerLength)); } else { /* (( brace matching editor workaround to compensate for the following line */ printf("EH#%u: try [%04X..%04X) handled by [%04X..%04X) ", num, dspOffset(clause.TryOffset), dspOffset(clause.TryLength), dspOffset(clause.HandlerOffset), dspOffset(clause.HandlerLength)); } // Note: the flags field is kind of weird. It should be compared for equality // to determine the type of clause, even though it looks like a bitfield. In // Particular, CORINFO_EH_CLAUSE_NONE is zero, so you can "&" to check it. // You do need to mask off the bits, though, because CORINFO_EH_CLAUSE_DUPLICATE // is and'ed in. const DWORD CORINFO_EH_CLAUSE_TYPE_MASK = 0x7; switch (clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK) { case CORINFO_EH_CLAUSE_NONE: printf("(class: %04X)", clause.ClassToken); break; case CORINFO_EH_CLAUSE_FILTER: if (opts.dspDiffable) { /* ( brace matching editor workaround to compensate for the following line */ printf("filter at [%s..%s)", GetEmitter()->emitOffsetToLabel(clause.ClassToken), GetEmitter()->emitOffsetToLabel(clause.HandlerOffset)); } else { /* ( brace matching editor workaround to compensate for the following line */ printf("filter at [%04X..%04X)", dspOffset(clause.ClassToken), dspOffset(clause.HandlerOffset)); } break; case CORINFO_EH_CLAUSE_FINALLY: printf("(finally)"); break; case CORINFO_EH_CLAUSE_FAULT: printf("(fault)"); break; default: printf("(UNKNOWN type %u!)", clause.Flags & CORINFO_EH_CLAUSE_TYPE_MASK); assert(!"unknown type"); break; } if ((clause.TryOffset == clause.TryLength) && (clause.TryOffset == clause.HandlerOffset) && ((clause.Flags & (CORINFO_EH_CLAUSE_DUPLICATE | CORINFO_EH_CLAUSE_FINALLY)) == (CORINFO_EH_CLAUSE_DUPLICATE | CORINFO_EH_CLAUSE_FINALLY))) { printf(" cloned finally"); } else if (clause.Flags & CORINFO_EH_CLAUSE_DUPLICATE) { printf(" duplicated"); } else if (clause.Flags & CORINFO_EH_CLAUSE_SAMETRY) { printf(" same try"); } printf("\n"); } /*****************************************************************************/ void Compiler::fgVerifyHandlerTab() { if (compIsForInlining()) { // We don't inline functions with EH. Don't bother verifying the EH table in the inlinee Compiler. return; } if (compHndBBtabCount == 0) { return; } // Did we do the normalization that prevents the first block of a handler from being a 'try' block (case 1)? bool handlerBegIsTryBegNormalizationDone = fgNormalizeEHDone; // Did we do the normalization that prevents multiple EH regions (namely, 'try' blocks) from starting on the same // block (case 2)? bool multipleBegBlockNormalizationDone = fgNormalizeEHDone; // Did we do the normalization that prevents multiple EH regions ('try' or handler blocks) from ending on the same // block (case 3)? bool multipleLastBlockNormalizationDone = false; // Currently disabled assert(compHndBBtabCount <= compHndBBtabAllocCount); unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { assert(HBtab->ebdTryBeg != nullptr); assert(HBtab->ebdTryLast != nullptr); assert(HBtab->ebdHndBeg != nullptr); assert(HBtab->ebdHndLast != nullptr); assert(HBtab->ebdTryBeg->bbFlags & BBF_TRY_BEG); assert(HBtab->ebdTryBeg->bbFlags & BBF_DONT_REMOVE); assert(HBtab->ebdHndBeg->bbFlags & BBF_DONT_REMOVE); assert((HBtab->ebdTryBeg->bbFlags & BBF_REMOVED) == 0); assert((HBtab->ebdTryLast->bbFlags & BBF_REMOVED) == 0); assert((HBtab->ebdHndBeg->bbFlags & BBF_REMOVED) == 0); assert((HBtab->ebdHndLast->bbFlags & BBF_REMOVED) == 0); if (HBtab->HasFilter()) { assert(HBtab->ebdFilter != nullptr); assert(HBtab->ebdFilter->bbFlags & BBF_DONT_REMOVE); assert((HBtab->ebdFilter->bbFlags & BBF_REMOVED) == 0); } #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { assert(HBtab->ebdHndBeg->bbFlags & BBF_FUNCLET_BEG); if (HBtab->HasFilter()) { assert(HBtab->ebdFilter->bbFlags & BBF_FUNCLET_BEG); } } #endif // FEATURE_EH_FUNCLETS } // I want to assert things about the relative ordering of blocks in the block list using // block number, but I don't want to renumber the basic blocks, which might cause a difference // between debug and non-debug code paths. So, create a renumbered block mapping: map the // existing block number to a renumbered block number that is ordered by block list order. unsigned bbNumMax = impInlineRoot()->fgBBNumMax; // blockNumMap[old block number] => new block number size_t blockNumBytes = (bbNumMax + 1) * sizeof(unsigned); unsigned* blockNumMap = (unsigned*)_alloca(blockNumBytes); memset(blockNumMap, 0, blockNumBytes); unsigned newBBnum = 1; for (BasicBlock* const block : Blocks()) { assert((block->bbFlags & BBF_REMOVED) == 0); assert(1 <= block->bbNum && block->bbNum <= bbNumMax); assert(blockNumMap[block->bbNum] == 0); // If this fails, we have two blocks with the same block number. blockNumMap[block->bbNum] = newBBnum++; } // Note that there may be some blockNumMap[x] == 0, for a block number 'x' that has been deleted, if the blocks // haven't been renumbered since the deletion. #if 0 // Useful for debugging, but don't want to put this in the dump all the time if (verbose) { printf("fgVerifyHandlerTab block number map: BB current => BB new\n"); for (unsigned i = 0; i <= bbNumMax; i++) { if (blockNumMap[i] != 0) { printf(FMT_BB " => " FMT_BB "\n", i, blockNumMap[i]); } } } #endif // To verify that bbCatchTyp is set properly on all blocks, and that some BBF_* flags are only set on the first // block of 'try' or handlers, create two bool arrays indexed by block number: one for the set of blocks that // are the beginning blocks of 'try' regions, and one for blocks that are the beginning of handlers (including // filters). Note that since this checking function runs before EH normalization, we have to handle the case // where blocks can be both the beginning of a 'try' as well as the beginning of a handler. After we've iterated // over the EH table, loop over all blocks and verify that only handler begin blocks have bbCatchTyp == BBCT_NONE, // and some other things. size_t blockBoolSetBytes = (bbNumMax + 1) * sizeof(bool); bool* blockTryBegSet = (bool*)_alloca(blockBoolSetBytes); bool* blockHndBegSet = (bool*)_alloca(blockBoolSetBytes); for (unsigned i = 0; i <= bbNumMax; i++) { blockTryBegSet[i] = false; blockHndBegSet[i] = false; } #if defined(FEATURE_EH_FUNCLETS) bool isLegalFirstFunclet = false; unsigned bbNumFirstFunclet = 0; if (fgFuncletsCreated) { // Assert some things about the "first funclet block" pointer. assert(fgFirstFuncletBB != nullptr); assert((fgFirstFuncletBB->bbFlags & BBF_REMOVED) == 0); bbNumFirstFunclet = blockNumMap[fgFirstFuncletBB->bbNum]; assert(bbNumFirstFunclet != 0); } else { assert(fgFirstFuncletBB == nullptr); } #endif // FEATURE_EH_FUNCLETS for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { unsigned bbNumTryBeg = blockNumMap[HBtab->ebdTryBeg->bbNum]; unsigned bbNumTryLast = blockNumMap[HBtab->ebdTryLast->bbNum]; unsigned bbNumHndBeg = blockNumMap[HBtab->ebdHndBeg->bbNum]; unsigned bbNumHndLast = blockNumMap[HBtab->ebdHndLast->bbNum]; unsigned bbNumFilter = 0; // This should never get used except under "if (HBtab->HasFilter())" if (HBtab->HasFilter()) { bbNumFilter = blockNumMap[HBtab->ebdFilter->bbNum]; } // Assert that the EH blocks are in the main block list assert(bbNumTryBeg != 0); assert(bbNumTryLast != 0); assert(bbNumHndBeg != 0); assert(bbNumHndLast != 0); if (HBtab->HasFilter()) { assert(bbNumFilter != 0); } // Check relative ordering of the 'beg' and 'last' blocks. Note that in IL (and in our initial block list) // there is no required ordering between the 'try' and handler regions: the handler might come first! // After funclets have been created, all the handler blocks come in sequence at the end of the // function (this is checked below, with checks for the first funclet block). Note that a handler // might contain a nested 'try', which will also then be in the "funclet region". // Also, the 'try' and handler regions do not need to be adjacent. assert(bbNumTryBeg <= bbNumTryLast); assert(bbNumHndBeg <= bbNumHndLast); if (HBtab->HasFilter()) { // Since the filter block must be different from the handler, this condition is "<", not "<=". assert(bbNumFilter < bbNumHndBeg); } // The EH regions are disjoint: the handler (including the filter, if applicable) is strictly before or after // the 'try'. if (HBtab->HasFilter()) { assert((bbNumHndLast < bbNumTryBeg) || (bbNumTryLast < bbNumFilter)); } else { assert((bbNumHndLast < bbNumTryBeg) || (bbNumTryLast < bbNumHndBeg)); } #if defined(FEATURE_EH_FUNCLETS) // If funclets have been created, check the first funclet block. The first funclet block must be the // first block of a filter or handler. All filter/handler blocks must come after it. // Note that 'try' blocks might come either before or after it. If after, they will be nested within // a handler. If before, they might be nested within a try, but not within a handler. if (fgFuncletsCreated) { if (bbNumTryLast < bbNumFirstFunclet) { // This EH region can't be nested in a handler, or else it would be in the funclet region. assert(HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX); } else { // The last block of the 'try' is in the funclet region; make sure the whole thing is. if (multipleBegBlockNormalizationDone) { assert(bbNumTryBeg > bbNumFirstFunclet); // ">" because a 'try' can't be the first block of a // handler (by EH normalization). } else { assert(bbNumTryBeg >= bbNumFirstFunclet); } // This EH region must be nested in a handler. assert(HBtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX); } if (HBtab->HasFilter()) { assert(bbNumFirstFunclet <= bbNumFilter); if (fgFirstFuncletBB == HBtab->ebdFilter) { assert(!isLegalFirstFunclet); // We can't have already found a matching block for the first funclet. isLegalFirstFunclet = true; } } else { assert(bbNumFirstFunclet <= bbNumHndBeg); if (fgFirstFuncletBB == HBtab->ebdHndBeg) { assert(!isLegalFirstFunclet); // We can't have already found a matching block for the first funclet. isLegalFirstFunclet = true; } } } #endif // FEATURE_EH_FUNCLETS // Check the 'try' region nesting, using ebdEnclosingTryIndex. // Only check one level of nesting, since we'll check the outer EH region (and its nesting) when we get to it // later. if (HBtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) { assert(HBtab->ebdEnclosingTryIndex > XTnum); // The enclosing region must come after this one in the table EHblkDsc* HBtabOuter = ehGetDsc(HBtab->ebdEnclosingTryIndex); unsigned bbNumOuterTryBeg = blockNumMap[HBtabOuter->ebdTryBeg->bbNum]; unsigned bbNumOuterTryLast = blockNumMap[HBtabOuter->ebdTryLast->bbNum]; // A few basic asserts (that will also get covered later, when this outer region gets handled). assert(bbNumOuterTryBeg != 0); assert(bbNumOuterTryLast != 0); assert(bbNumOuterTryBeg <= bbNumOuterTryLast); if (!EHblkDsc::ebdIsSameTry(HBtab, HBtabOuter)) { // If it's not a mutually protect region, then the outer 'try' must completely lexically contain all the // blocks in the nested EH region. However, if funclets have been created, this is no longer true, since // this 'try' might be in a handler that is pulled out to the funclet region, while the outer 'try' // remains in the main function region. CLANG_FORMAT_COMMENT_ANCHOR; #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // If both the 'try' region and the outer 'try' region are in the main function area, then we can // do the normal nesting check. Otherwise, it's harder to find a useful assert to make about their // relationship. if ((bbNumTryLast < bbNumFirstFunclet) && (bbNumOuterTryLast < bbNumFirstFunclet)) { if (multipleBegBlockNormalizationDone) { assert(bbNumOuterTryBeg < bbNumTryBeg); // Two 'try' regions can't start at the same // block (by EH normalization). } else { assert(bbNumOuterTryBeg <= bbNumTryBeg); } if (multipleLastBlockNormalizationDone) { assert(bbNumTryLast < bbNumOuterTryLast); // Two 'try' regions can't end at the same block //(by EH normalization). } else { assert(bbNumTryLast <= bbNumOuterTryLast); } } // With funclets, all we can say about the handler blocks is that they are disjoint from the // enclosing try. assert((bbNumHndLast < bbNumOuterTryBeg) || (bbNumOuterTryLast < bbNumHndBeg)); } else #endif // FEATURE_EH_FUNCLETS { if (multipleBegBlockNormalizationDone) { assert(bbNumOuterTryBeg < bbNumTryBeg); // Two 'try' regions can't start at the same block // (by EH normalization). } else { assert(bbNumOuterTryBeg <= bbNumTryBeg); } assert(bbNumOuterTryBeg < bbNumHndBeg); // An inner handler can never start at the same // block as an outer 'try' (by IL rules). if (multipleLastBlockNormalizationDone) { // An inner EH region can't share a 'last' block with the outer 'try' (by EH normalization). assert(bbNumTryLast < bbNumOuterTryLast); assert(bbNumHndLast < bbNumOuterTryLast); } else { assert(bbNumTryLast <= bbNumOuterTryLast); assert(bbNumHndLast <= bbNumOuterTryLast); } } } } // Check the handler region nesting, using ebdEnclosingHndIndex. // Only check one level of nesting, since we'll check the outer EH region (and its nesting) when we get to it // later. if (HBtab->ebdEnclosingHndIndex != EHblkDsc::NO_ENCLOSING_INDEX) { assert(HBtab->ebdEnclosingHndIndex > XTnum); // The enclosing region must come after this one in the table EHblkDsc* HBtabOuter = ehGetDsc(HBtab->ebdEnclosingHndIndex); unsigned bbNumOuterHndBeg = blockNumMap[HBtabOuter->ebdHndBeg->bbNum]; unsigned bbNumOuterHndLast = blockNumMap[HBtabOuter->ebdHndLast->bbNum]; // A few basic asserts (that will also get covered later, when this outer regions gets handled). assert(bbNumOuterHndBeg != 0); assert(bbNumOuterHndLast != 0); assert(bbNumOuterHndBeg <= bbNumOuterHndLast); // The outer handler must completely contain all the blocks in the EH region nested within it. However, if // funclets have been created, it's harder to make any relationship asserts about the order of nested // handlers, which also have been made into funclets. #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { if (handlerBegIsTryBegNormalizationDone) { assert(bbNumOuterHndBeg < bbNumTryBeg); // An inner 'try' can't start at the same block as an // outer handler (by EH normalization). } else { assert(bbNumOuterHndBeg <= bbNumTryBeg); } if (multipleLastBlockNormalizationDone) { assert(bbNumTryLast < bbNumOuterHndLast); // An inner 'try' can't end at the same block as an // outer handler (by EH normalization). } else { assert(bbNumTryLast <= bbNumOuterHndLast); } // With funclets, all we can say about the handler blocks is that they are disjoint from the enclosing // handler. assert((bbNumHndLast < bbNumOuterHndBeg) || (bbNumOuterHndLast < bbNumHndBeg)); } else #endif // FEATURE_EH_FUNCLETS { if (handlerBegIsTryBegNormalizationDone) { assert(bbNumOuterHndBeg < bbNumTryBeg); // An inner 'try' can't start at the same block as an // outer handler (by EH normalization). } else { assert(bbNumOuterHndBeg <= bbNumTryBeg); } assert(bbNumOuterHndBeg < bbNumHndBeg); // An inner handler can never start at the same block // as an outer handler (by IL rules). if (multipleLastBlockNormalizationDone) { // An inner EH region can't share a 'last' block with the outer handler (by EH normalization). assert(bbNumTryLast < bbNumOuterHndLast); assert(bbNumHndLast < bbNumOuterHndLast); } else { assert(bbNumTryLast <= bbNumOuterHndLast); assert(bbNumHndLast <= bbNumOuterHndLast); } } } // Set up blockTryBegSet and blockHndBegSet. // We might want to have this assert: // if (fgNormalizeEHDone) assert(!blockTryBegSet[HBtab->ebdTryBeg->bbNum]); // But we can't, because if we have mutually-protect 'try' regions, we'll see exactly the same tryBeg twice // (or more). blockTryBegSet[HBtab->ebdTryBeg->bbNum] = true; assert(!blockHndBegSet[HBtab->ebdHndBeg->bbNum]); blockHndBegSet[HBtab->ebdHndBeg->bbNum] = true; if (HBtab->HasFilter()) { assert(HBtab->ebdFilter->bbCatchTyp == BBCT_FILTER); assert(!blockHndBegSet[HBtab->ebdFilter->bbNum]); blockHndBegSet[HBtab->ebdFilter->bbNum] = true; } // Check the block bbCatchTyp for this EH region's filter and handler. if (HBtab->HasFilter()) { assert(HBtab->ebdHndBeg->bbCatchTyp == BBCT_FILTER_HANDLER); } else if (HBtab->HasCatchHandler()) { assert((HBtab->ebdHndBeg->bbCatchTyp != BBCT_NONE) && (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FAULT) && (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FINALLY) && (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FILTER) && (HBtab->ebdHndBeg->bbCatchTyp != BBCT_FILTER_HANDLER)); } else if (HBtab->HasFaultHandler()) { assert(HBtab->ebdHndBeg->bbCatchTyp == BBCT_FAULT); } else if (HBtab->HasFinallyHandler()) { assert(HBtab->ebdHndBeg->bbCatchTyp == BBCT_FINALLY); } } #if defined(FEATURE_EH_FUNCLETS) assert(!fgFuncletsCreated || isLegalFirstFunclet); #endif // FEATURE_EH_FUNCLETS // Figure out what 'try' and handler index each basic block should have, // and check the blocks against that. This depends on the more nested EH // clauses appearing first. For duplicate clauses, we use the duplicate // clause 'try' region to set the try index, since a handler that has // been pulled out of an enclosing 'try' wouldn't have had its try index // otherwise set. The duplicate clause handler is truly a duplicate of // a previously processed handler, so we ignore it. BasicBlock* block; size_t blockIndexBytes = (bbNumMax + 1) * sizeof(unsigned short); unsigned short* blockTryIndex = (unsigned short*)_alloca(blockIndexBytes); unsigned short* blockHndIndex = (unsigned short*)_alloca(blockIndexBytes); memset(blockTryIndex, 0, blockIndexBytes); memset(blockHndIndex, 0, blockIndexBytes); for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { BasicBlock* blockEnd; for (block = HBtab->ebdTryBeg, blockEnd = HBtab->ebdTryLast->bbNext; block != blockEnd; block = block->bbNext) { if (blockTryIndex[block->bbNum] == 0) { blockTryIndex[block->bbNum] = (unsigned short)(XTnum + 1); } } for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), blockEnd = HBtab->ebdHndLast->bbNext; block != blockEnd; block = block->bbNext) { if (blockHndIndex[block->bbNum] == 0) { blockHndIndex[block->bbNum] = (unsigned short)(XTnum + 1); } } } #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // Mark all the funclet 'try' indices correctly, since they do not exist in the linear 'try' region that // we looped over above. This is similar to duplicate clause logic, but we only need to look at the most // nested enclosing try index, not the entire set of enclosing try indices, since that is what we store // on the block. for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { unsigned enclosingTryIndex = ehTrueEnclosingTryIndexIL(XTnum); // find the true enclosing try index, // ignoring 'mutual protect' trys if (enclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) { // The handler funclet for 'XTnum' has a try index of 'enclosingTryIndex' (at least, the parts of the // funclet that don't already have a more nested 'try' index because a 'try' is nested within the // handler). BasicBlock* blockEnd; for (block = (HBtab->HasFilter() ? HBtab->ebdFilter : HBtab->ebdHndBeg), blockEnd = HBtab->ebdHndLast->bbNext; block != blockEnd; block = block->bbNext) { if (blockTryIndex[block->bbNum] == 0) { blockTryIndex[block->bbNum] = (unsigned short)(enclosingTryIndex + 1); } } } } } #endif // FEATURE_EH_FUNCLETS // Make sure that all blocks have the right index, including those blocks that should have zero (no EH region). for (BasicBlock* const block : Blocks()) { assert(block->bbTryIndex == blockTryIndex[block->bbNum]); assert(block->bbHndIndex == blockHndIndex[block->bbNum]); // Also, since we're walking the blocks, check that all blocks we didn't mark as EH handler 'begin' blocks // already have bbCatchTyp set properly. if (!blockHndBegSet[block->bbNum]) { assert(block->bbCatchTyp == BBCT_NONE); #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { // Make sure blocks that aren't the first block of a funclet do not have the BBF_FUNCLET_BEG flag set. assert((block->bbFlags & BBF_FUNCLET_BEG) == 0); } #endif // FEATURE_EH_FUNCLETS } // Only the first block of 'try' regions should have BBF_TRY_BEG set. if (!blockTryBegSet[block->bbNum]) { assert((block->bbFlags & BBF_TRY_BEG) == 0); } } } void Compiler::fgDispHandlerTab() { printf("\n*************** Exception Handling table"); if (compHndBBtabCount == 0) { printf(" is empty\n"); return; } printf("\nindex "); #if !defined(FEATURE_EH_FUNCLETS) printf("nest, "); #endif // !FEATURE_EH_FUNCLETS printf("eTry, eHnd\n"); unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { HBtab->DispEntry(XTnum); } } #endif // DEBUG /*****************************************************************************/ /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX "Compiler" functions: EH tree verification XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * The following code checks the following rules for the EH table: * 1. Overlapping of try blocks not allowed. * 2. Handler blocks cannot be shared between different try blocks. * 3. Try blocks with Finally or Fault blocks cannot have other handlers. * 4. If block A contains block B, A should also contain B's try/filter/handler. * 5. A block cannot contain it's related try/filter/handler. * 6. Nested block must appear before containing block * */ void Compiler::verInitEHTree(unsigned numEHClauses) { ehnNext = new (this, CMK_BasicBlock) EHNodeDsc[numEHClauses * 3]; ehnTree = nullptr; } /* Inserts the try, handler and filter (optional) clause information in a tree structure * in order to catch incorrect eh formatting (e.g. illegal overlaps, incorrect order) */ void Compiler::verInsertEhNode(CORINFO_EH_CLAUSE* clause, EHblkDsc* handlerTab) { EHNodeDsc* tryNode = ehnNext++; EHNodeDsc* handlerNode = ehnNext++; EHNodeDsc* filterNode = nullptr; // optional tryNode->ehnSetTryNodeType(); tryNode->ehnStartOffset = clause->TryOffset; tryNode->ehnEndOffset = clause->TryOffset + clause->TryLength - 1; tryNode->ehnHandlerNode = handlerNode; if (clause->Flags & CORINFO_EH_CLAUSE_FINALLY) { handlerNode->ehnSetFinallyNodeType(); } else if (clause->Flags & CORINFO_EH_CLAUSE_FAULT) { handlerNode->ehnSetFaultNodeType(); } else { handlerNode->ehnSetHandlerNodeType(); } handlerNode->ehnStartOffset = clause->HandlerOffset; handlerNode->ehnEndOffset = clause->HandlerOffset + clause->HandlerLength - 1; handlerNode->ehnTryNode = tryNode; if (clause->Flags & CORINFO_EH_CLAUSE_FILTER) { filterNode = ehnNext++; filterNode->ehnStartOffset = clause->FilterOffset; BasicBlock* blk = handlerTab->BBFilterLast(); filterNode->ehnEndOffset = blk->bbCodeOffsEnd - 1; noway_assert(filterNode->ehnEndOffset != 0); filterNode->ehnSetFilterNodeType(); filterNode->ehnTryNode = tryNode; tryNode->ehnFilterNode = filterNode; } verInsertEhNodeInTree(&ehnTree, tryNode); verInsertEhNodeInTree(&ehnTree, handlerNode); if (filterNode) { verInsertEhNodeInTree(&ehnTree, filterNode); } } /* The root node could be changed by this method. node is inserted to (a) right of root (root.right <-- node) (b) left of root (node.right <-- root; node becomes root) (c) child of root (root.child <-- node) (d) parent of root (node.child <-- root; node becomes root) (e) equivalent of root (root.equivalent <-- node) such that siblings are ordered from left to right child parent relationship and equivalence relationship are not violated Here is a list of all possible cases Case 1 2 3 4 5 6 7 8 9 10 11 12 13 | | | | | | | | | | .......|.|.|.|..................... [ root start ] ..... | | | | | | | | | | | | | | r| | | | | | | | o| | | | | | o| | | | | | t| | | | | | | | | | | | | | | | | | | | | |..........|.|.|.|.....|........|.. [ root end ] ........ | | | | | | | | | | | | | | |<-- - - - n o d e - - - -->| Case Operation -------------- 1 (b) 2 Error 3 Error 4 (d) 5 (d) 6 (d) 7 Error 8 Error 9 (a) 10 (c) 11 (c) 12 (c) 13 (e) */ void Compiler::verInsertEhNodeInTree(EHNodeDsc** ppRoot, EHNodeDsc* node) { unsigned nStart = node->ehnStartOffset; unsigned nEnd = node->ehnEndOffset; if (nStart > nEnd) { BADCODE("start offset greater or equal to end offset"); } node->ehnNext = nullptr; node->ehnChild = nullptr; node->ehnEquivalent = nullptr; while (true) { if (*ppRoot == nullptr) { *ppRoot = node; break; } unsigned rStart = (*ppRoot)->ehnStartOffset; unsigned rEnd = (*ppRoot)->ehnEndOffset; if (nStart < rStart) { // Case 1 if (nEnd < rStart) { // Left sibling node->ehnNext = *ppRoot; *ppRoot = node; return; } // Case 2, 3 if (nEnd < rEnd) { //[Error] BADCODE("Overlapping try regions"); } // Case 4, 5 //[Parent] verInsertEhNodeParent(ppRoot, node); return; } // Cases 6 - 13 (nStart >= rStart) if (nEnd > rEnd) { // Case 6, 7, 8, 9 // Case 9 if (nStart > rEnd) { //[RightSibling] // Recurse with Root.Sibling as the new root ppRoot = &((*ppRoot)->ehnNext); continue; } // Case 6 if (nStart == rStart) { //[Parent] if (node->ehnIsTryBlock() || (*ppRoot)->ehnIsTryBlock()) { verInsertEhNodeParent(ppRoot, node); return; } // non try blocks are not allowed to start at the same offset BADCODE("Handlers start at the same offset"); } // Case 7, 8 BADCODE("Overlapping try regions"); } // Case 10-13 (nStart >= rStart && nEnd <= rEnd) if ((nStart != rStart) || (nEnd != rEnd)) { // Cases 10,11,12 //[Child] if ((*ppRoot)->ehnIsTryBlock()) { BADCODE("Inner try appears after outer try in exception handling table"); } else { // We have an EH clause nested within a handler, but the parent // handler clause came first in the table. The rest of the compiler // doesn't expect this, so sort the EH table. fgNeedToSortEHTable = true; // Case 12 (nStart == rStart) // non try blocks are not allowed to start at the same offset if ((nStart == rStart) && !node->ehnIsTryBlock()) { BADCODE("Handlers start at the same offset"); } // check this! ppRoot = &((*ppRoot)->ehnChild); continue; } } // Case 13 //[Equivalent] if (!node->ehnIsTryBlock() && !(*ppRoot)->ehnIsTryBlock()) { BADCODE("Handlers cannot be shared"); } if (!node->ehnIsTryBlock() || !(*ppRoot)->ehnIsTryBlock()) { // Equivalent is only allowed for try bodies // If one is a handler, this means the nesting is wrong BADCODE("Handler and try with the same offset"); } node->ehnEquivalent = node->ehnNext = *ppRoot; // check that the corresponding handler is either a catch handler // or a filter if (node->ehnHandlerNode->ehnIsFaultBlock() || node->ehnHandlerNode->ehnIsFinallyBlock() || (*ppRoot)->ehnHandlerNode->ehnIsFaultBlock() || (*ppRoot)->ehnHandlerNode->ehnIsFinallyBlock()) { BADCODE("Try block with multiple non-filter/non-handler blocks"); } break; } } /********************************************************************** * Make node the parent of *ppRoot. All siblings of *ppRoot that are * fully or partially nested in node remain siblings of *ppRoot */ void Compiler::verInsertEhNodeParent(EHNodeDsc** ppRoot, EHNodeDsc* node) { noway_assert(node->ehnNext == nullptr); noway_assert(node->ehnChild == nullptr); // Root is nested in Node noway_assert(node->ehnStartOffset <= (*ppRoot)->ehnStartOffset); noway_assert(node->ehnEndOffset >= (*ppRoot)->ehnEndOffset); // Root is not the same as Node noway_assert(node->ehnStartOffset != (*ppRoot)->ehnStartOffset || node->ehnEndOffset != (*ppRoot)->ehnEndOffset); if (node->ehnIsFilterBlock()) { BADCODE("Protected block appearing within filter block"); } EHNodeDsc* lastChild = nullptr; EHNodeDsc* sibling = (*ppRoot)->ehnNext; while (sibling) { // siblings are ordered left to right, largest right. // nodes have a width of at least one. // Hence sibling start will always be after Node start. noway_assert(sibling->ehnStartOffset > node->ehnStartOffset); // (1) // disjoint if (sibling->ehnStartOffset > node->ehnEndOffset) { break; } // partial containment. if (sibling->ehnEndOffset > node->ehnEndOffset) // (2) { BADCODE("Overlapping try regions"); } // else full containment (follows from (1) and (2)) lastChild = sibling; sibling = sibling->ehnNext; } // All siblings of Root up to and including lastChild will continue to be // siblings of Root (and children of Node). The node to the right of // lastChild will become the first sibling of Node. // if (lastChild) { // Node has more than one child including Root node->ehnNext = lastChild->ehnNext; lastChild->ehnNext = nullptr; } else { // Root is the only child of Node node->ehnNext = (*ppRoot)->ehnNext; (*ppRoot)->ehnNext = nullptr; } node->ehnChild = *ppRoot; *ppRoot = node; } /***************************************************************************** * Checks the following two conditions: * 1) If block A contains block B, A should also contain B's try/filter/handler. * 2) A block cannot contain its related try/filter/handler. * Both these conditions are checked by making sure that all the blocks for an * exception clause are at the same level. * The algorithm is: for each exception clause, determine the first block and * search through the next links for its corresponding try/handler/filter as the * case may be. If not found, then fail. */ void Compiler::verCheckNestingLevel(EHNodeDsc* root) { EHNodeDsc* ehnNode = root; #define exchange(a, b) \ { \ temp = a; \ a = b; \ b = temp; \ } for (unsigned XTnum = 0; XTnum < compHndBBtabCount; XTnum++) { EHNodeDsc *p1, *p2, *p3, *temp, *search; p1 = ehnNode++; p2 = ehnNode++; // we are relying on the fact that ehn nodes are allocated sequentially. noway_assert(p1->ehnHandlerNode == p2); noway_assert(p2->ehnTryNode == p1); // arrange p1 and p2 in sequential order if (p1->ehnStartOffset == p2->ehnStartOffset) { BADCODE("shared exception handler"); } if (p1->ehnStartOffset > p2->ehnStartOffset) exchange(p1, p2); temp = p1->ehnNext; unsigned numSiblings = 0; search = p2; if (search->ehnEquivalent) { search = search->ehnEquivalent; } do { if (temp == search) { numSiblings++; break; } if (temp) { temp = temp->ehnNext; } } while (temp); CORINFO_EH_CLAUSE clause; info.compCompHnd->getEHinfo(info.compMethodHnd, XTnum, &clause); if (clause.Flags & CORINFO_EH_CLAUSE_FILTER) { p3 = ehnNode++; noway_assert(p3->ehnTryNode == p1 || p3->ehnTryNode == p2); noway_assert(p1->ehnFilterNode == p3 || p2->ehnFilterNode == p3); if (p3->ehnStartOffset < p1->ehnStartOffset) { temp = p3; search = p1; } else if (p3->ehnStartOffset < p2->ehnStartOffset) { temp = p1; search = p3; } else { temp = p2; search = p3; } if (search->ehnEquivalent) { search = search->ehnEquivalent; } do { if (temp == search) { numSiblings++; break; } temp = temp->ehnNext; } while (temp); } else { numSiblings++; } if (numSiblings != 2) { BADCODE("Outer block does not contain all code in inner handler"); } } } #if defined(FEATURE_EH_FUNCLETS) #if defined(TARGET_ARM) /***************************************************************************** * We just removed a BBJ_CALLFINALLY/BBJ_ALWAYS pair. If this was the only such pair * targeting the BBJ_ALWAYS target, then we need to clear the BBF_FINALLY_TARGET bit * so that target can also be removed. 'block' is the finally target. Since we just * removed the BBJ_ALWAYS, it better have the BBF_FINALLY_TARGET bit set. */ void Compiler::fgClearFinallyTargetBit(BasicBlock* block) { assert(fgComputePredsDone); assert((block->bbFlags & BBF_FINALLY_TARGET) != 0); for (BasicBlock* const predBlock : block->PredBlocks()) { if (predBlock->bbJumpKind == BBJ_ALWAYS && predBlock->bbJumpDest == block) { BasicBlock* pPrev = predBlock->bbPrev; if (pPrev != nullptr) { if (pPrev->bbJumpKind == BBJ_CALLFINALLY) { // We found a BBJ_CALLFINALLY / BBJ_ALWAYS that still points to this finally target return; } } } } // Didn't find any BBJ_CALLFINALLY / BBJ_ALWAYS that still points here, so clear the bit block->bbFlags &= ~BBF_FINALLY_TARGET; } #endif // defined(TARGET_ARM) /***************************************************************************** * Is this an intra-handler control flow edge? * * 'block' is the head block of a funclet/handler region, or . * 'predBlock' is a predecessor block of 'block' in the predecessor list. * * 'predBlock' can legally only be one of three things: * 1. in the same handler region (e.g., the source of a back-edge of a loop from * 'predBlock' to 'block'), including in nested regions within the handler, * 2. if 'block' begins a handler that is a filter-handler, 'predBlock' must be in the 'filter' region, * 3. for other handlers, 'predBlock' must be in the 'try' region corresponding to handler (or any * region nested in the 'try' region). * * Note that on AMD64/ARM64, the BBJ_CALLFINALLY block that calls a finally handler is not * within the corresponding 'try' region: it is placed in the corresponding 'try' region's * parent (which might be the main function body). This is how it is represented to the VM * (with a special "cloned finally" EH table entry). * * Return 'true' for case #1, and 'false' otherwise. */ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) { // Some simple preconditions (as stated above) assert(!fgFuncletsCreated); assert(fgGetPredForBlock(block, predBlock) != nullptr); assert(block->hasHndIndex()); EHblkDsc* xtab = ehGetDsc(block->getHndIndex()); #if FEATURE_EH_CALLFINALLY_THUNKS if (xtab->HasFinallyHandler()) { assert((xtab->ebdHndBeg == block) || // The normal case ((xtab->ebdHndBeg->bbNext == block) && (xtab->ebdHndBeg->bbFlags & BBF_INTERNAL))); // After we've already inserted a header block, and we're // trying to decide how to split up the predecessor edges. if (predBlock->bbJumpKind == BBJ_CALLFINALLY) { assert(predBlock->bbJumpDest == block); // A BBJ_CALLFINALLY predecessor of the handler can only come from the corresponding try, // not from any EH clauses nested in this handler. However, we represent the BBJ_CALLFINALLY // as being in the 'try' region's parent EH region, which might be the main function body. unsigned tryIndex = xtab->ebdEnclosingTryIndex; if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { assert(!predBlock->hasTryIndex()); } else { assert(predBlock->hasTryIndex()); assert(tryIndex == predBlock->getTryIndex()); assert(ehGetDsc(tryIndex)->InTryRegionBBRange(predBlock)); } return false; } } #endif // FEATURE_EH_CALLFINALLY_THUNKS assert(predBlock->hasHndIndex() || predBlock->hasTryIndex()); // We could search the try region looking for predBlock by using bbInTryRegions // but that does a lexical search for the block, and then assumes funclets // have been created and does a lexical search of all funclets that were pulled // out of the parent try region. // First, funclets haven't been created yet, and even if they had been, we shouldn't // have any funclet directly branching to another funclet (they have to return first). // So we can safely use CheckIsTryRegion instead of bbInTryRegions. // Second, I believe the depth of any EH graph will on average be smaller than the // breadth of the blocks within a try body. Thus it is faster to get our answer by // looping outward over the region graph. However, I have added asserts, as a // precaution, to ensure both algorithms agree. The asserts also check that the only // way to reach the head of a funclet is from the corresponding try body or from // within the funclet (and *not* any nested funclets). if (predBlock->hasTryIndex()) { // Because the EH clauses are listed inside-out, any nested trys will be at a // lower index than the current try and if there's no enclosing try, tryIndex // will terminate at NO_ENCLOSING_INDEX unsigned tryIndex = predBlock->getTryIndex(); while (tryIndex < block->getHndIndex()) { tryIndex = ehGetEnclosingTryIndex(tryIndex); } // tryIndex should enclose predBlock assert((tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) || ehGetDsc(tryIndex)->InTryRegionBBRange(predBlock)); // At this point tryIndex is either block's handler's corresponding try body // or some outer try region that contains both predBlock & block or // NO_ENCLOSING_REGION (because there was no try body that encloses both). if (tryIndex == block->getHndIndex()) { assert(xtab->InTryRegionBBRange(predBlock)); assert(!xtab->InHndRegionBBRange(predBlock)); return false; } // tryIndex should enclose block (and predBlock as previously asserted) assert((tryIndex == EHblkDsc::NO_ENCLOSING_INDEX) || ehGetDsc(tryIndex)->InTryRegionBBRange(block)); } if (xtab->HasFilter()) { // The block is a handler. Check if the pred block is from its filter. We only need to // check the end filter flag, as there is only a single filter for any handler, and we // already know predBlock is a predecessor of block. if (predBlock->bbJumpKind == BBJ_EHFILTERRET) { assert(!xtab->InHndRegionBBRange(predBlock)); return false; } } // It is not in our try region (or filter), so it must be within this handler (or try bodies // within this handler) assert(!xtab->InTryRegionBBRange(predBlock)); assert(xtab->InHndRegionBBRange(predBlock)); return true; } /***************************************************************************** * Does this block, first block of a handler region, have any predecessor edges * that are not from its corresponding try region? */ bool Compiler::fgAnyIntraHandlerPreds(BasicBlock* block) { assert(block->hasHndIndex()); assert(fgFirstBlockOfHandler(block) == block); // this block is the first block of a handler for (BasicBlock* const predBlock : block->PredBlocks()) { if (fgIsIntraHandlerPred(predBlock, block)) { // We have a predecessor that is not from our try region return true; } } return false; } #else // !FEATURE_EH_FUNCLETS /***************************************************************************** * * Function called to relocate any and all EH regions. * Only entire consecutive EH regions will be moved and they will be kept together. * Except for the first block, the range can not have any blocks that jump into or out of the region. */ bool Compiler::fgRelocateEHRegions() { bool result = false; // Our return value #ifdef DEBUG if (verbose) printf("*************** In fgRelocateEHRegions()\n"); #endif if (fgCanRelocateEHRegions) { unsigned XTnum; EHblkDsc* HBtab; for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++) { // Nested EH regions cannot be moved. // Also we don't want to relocate an EH region that has a filter if ((HBtab->ebdHandlerNestingLevel == 0) && !HBtab->HasFilter()) { bool movedTry = false; #if DEBUG bool movedHnd = false; #endif // DEBUG // Only try to move the outermost try region if (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) { // Move the entire try region if it can be moved if (HBtab->ebdTryBeg->isRunRarely()) { BasicBlock* bTryLastBB = fgRelocateEHRange(XTnum, FG_RELOCATE_TRY); if (bTryLastBB != NULL) { result = true; movedTry = true; } } #if DEBUG if (verbose && movedTry) { printf("\nAfter relocating an EH try region"); fgDispBasicBlocks(); fgDispHandlerTab(); // Make sure that the predecessor lists are accurate if (expensiveDebugCheckLevel >= 2) { fgDebugCheckBBlist(); } } #endif // DEBUG } // Currently it is not good to move the rarely run handler regions to the end of the method // because fgDetermineFirstColdBlock() must put the start of any handler region in the hot // section. CLANG_FORMAT_COMMENT_ANCHOR; #if 0 // Now try to move the entire handler region if it can be moved. // Don't try to move a finally handler unless we already moved the try region. if (HBtab->ebdHndBeg->isRunRarely() && !HBtab->ebdHndBeg->hasTryIndex() && (movedTry || !HBtab->HasFinallyHandler())) { BasicBlock* bHndLastBB = fgRelocateEHRange(XTnum, FG_RELOCATE_HANDLER); if (bHndLastBB != NULL) { result = true; movedHnd = true; } } #endif // 0 #if DEBUG if (verbose && movedHnd) { printf("\nAfter relocating an EH handler region"); fgDispBasicBlocks(); fgDispHandlerTab(); // Make sure that the predecessor lists are accurate if (expensiveDebugCheckLevel >= 2) { fgDebugCheckBBlist(); } } #endif // DEBUG } } } #if DEBUG fgVerifyHandlerTab(); if (verbose && result) { printf("\nAfter fgRelocateEHRegions()"); fgDispBasicBlocks(); fgDispHandlerTab(); // Make sure that the predecessor lists are accurate fgDebugCheckBBlist(); } #endif // DEBUG return result; } #endif // !FEATURE_EH_FUNCLETS /***************************************************************************** * We've inserted a new block before 'block' that should be part of the same EH region as 'block'. * Update the EH table to make this so. Also, set the new block to have the right EH region data * (copy the bbTryIndex, bbHndIndex, and bbCatchTyp from 'block' to the new predecessor, and clear * 'bbCatchTyp' from 'block'). */ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) { assert(block->bbPrev != nullptr); BasicBlock* bPrev = block->bbPrev; bPrev->copyEHRegion(block); // The first block (and only the first block) of a handler has bbCatchTyp set bPrev->bbCatchTyp = block->bbCatchTyp; block->bbCatchTyp = BBCT_NONE; for (EHblkDsc* const HBtab : EHClauses(this)) { /* Multiple pointers in EHblkDsc can point to same block. We can not early out after the first match. */ if (HBtab->ebdTryBeg == block) { #ifdef DEBUG if (verbose) { printf("EH#%u: New first block of try: " FMT_BB "\n", ehGetIndex(HBtab), bPrev->bbNum); } #endif // DEBUG HBtab->ebdTryBeg = bPrev; bPrev->bbFlags |= BBF_TRY_BEG | BBF_DONT_REMOVE; // clear the TryBeg flag unless it begins another try region if (!bbIsTryBeg(block)) { block->bbFlags &= ~BBF_TRY_BEG; } } if (HBtab->ebdHndBeg == block) { #ifdef DEBUG if (verbose) { printf("EH#%u: New first block of handler: " FMT_BB "\n", ehGetIndex(HBtab), bPrev->bbNum); } #endif // DEBUG // The first block of a handler has an artificial extra refcount. Transfer that to the new block. noway_assert(block->countOfInEdges() > 0); block->bbRefs--; HBtab->ebdHndBeg = bPrev; bPrev->bbFlags |= BBF_DONT_REMOVE; #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { assert((block->bbFlags & BBF_FUNCLET_BEG) != 0); bPrev->bbFlags |= BBF_FUNCLET_BEG; block->bbFlags &= ~BBF_FUNCLET_BEG; } #endif // FEATURE_EH_FUNCLETS bPrev->bbRefs++; // If this is a handler for a filter, the last block of the filter will end with // a BBJ_EHFILTERRET block that has a bbJumpDest that jumps to the first block of // its handler. So we need to update it to keep things in sync. // if (HBtab->HasFilter()) { BasicBlock* bFilterLast = HBtab->BBFilterLast(); assert(bFilterLast != nullptr); assert(bFilterLast->bbJumpKind == BBJ_EHFILTERRET); assert(bFilterLast->bbJumpDest == block); #ifdef DEBUG if (verbose) { printf("EH#%u: Updating bbJumpDest for filter ret block: " FMT_BB " => " FMT_BB "\n", ehGetIndex(HBtab), bFilterLast->bbNum, bPrev->bbNum); } #endif // DEBUG // Change the bbJumpDest for bFilterLast from the old first 'block' to the new first 'bPrev' bFilterLast->bbJumpDest = bPrev; } } if (HBtab->HasFilter() && (HBtab->ebdFilter == block)) { #ifdef DEBUG if (verbose) { printf("EH#%u: New first block of filter: " FMT_BB "\n", ehGetIndex(HBtab), bPrev->bbNum); } #endif // DEBUG // The first block of a filter has an artificial extra refcount. Transfer that to the new block. noway_assert(block->countOfInEdges() > 0); block->bbRefs--; HBtab->ebdFilter = bPrev; bPrev->bbFlags |= BBF_DONT_REMOVE; #if defined(FEATURE_EH_FUNCLETS) if (fgFuncletsCreated) { assert((block->bbFlags & BBF_FUNCLET_BEG) != 0); bPrev->bbFlags |= BBF_FUNCLET_BEG; block->bbFlags &= ~BBF_FUNCLET_BEG; } #endif // FEATURE_EH_FUNCLETS bPrev->bbRefs++; } } } /***************************************************************************** * We've inserted a new block after 'block' that should be part of the same EH region as 'block'. * Update the EH table to make this so. Also, set the new block to have the right EH region data. */ void Compiler::fgExtendEHRegionAfter(BasicBlock* block) { BasicBlock* newBlk = block->bbNext; assert(newBlk != nullptr); newBlk->copyEHRegion(block); newBlk->bbCatchTyp = BBCT_NONE; // Only the first block of a catch has this set, and 'newBlk' can't be the first block of a catch. // TODO-Throughput: if the block is not in an EH region, then we don't need to walk the EH table looking for 'last' // block pointers to update. ehUpdateLastBlocks(block, newBlk); } //------------------------------------------------------------------------ // fgCheckEHCanInsertAfterBlock: Determine if a block can be inserted after // 'blk' and legally be put in the EH region specified by 'regionIndex'. This // can be true if the most nested region the block is in is already 'regionIndex', // as we'll just extend the most nested region (and any region ending at the same block). // It can also be true if it is the end of (a set of) EH regions, such that // inserting the block and properly extending some EH regions (if necessary) // puts the block in the correct region. We only consider the case of extending // an EH region after 'blk' (that is, to include 'blk' and the newly insert block); // we don't consider inserting a block as the the first block of an EH region following 'blk'. // // Consider this example: // // try3 try2 try1 // |--- | | BB01 // | |--- | BB02 // | | |--- BB03 // | | | BB04 // | |--- |--- BB05 // | BB06 // |----------------- BB07 // // Passing BB05 and try1/try2/try3 as the region to insert into (as well as putInTryRegion==true) // will all return 'true'. Here are the cases: // 1. Insert into try1: the most nested EH region BB05 is in is already try1, so we can insert after // it and extend try1 (and try2). // 2. Insert into try2: we can extend try2, but leave try1 alone. // 3. Insert into try3: we can leave try1 and try2 alone, and put the new block just in try3. Note that // in this case, after we "loop outwards" in the EH nesting, we get to a place where we're in the middle // of the try3 region, not at the end of it. // In all cases, it is possible to put a block after BB05 and put it in any of these three 'try' regions legally. // // Filters are ignored; if 'blk' is in a filter, the answer will be false. // // Arguments: // blk - the BasicBlock we are checking to see if we can insert after. // regionIndex - the EH region we want to insert a block into. regionIndex is // in the range [0..compHndBBtabCount]; 0 means "main method". // putInTryRegion - 'true' if the new block should be inserted in the 'try' region of 'regionIndex'. // For regionIndex 0 (the "main method"), this should be 'true'. // // Return Value: // 'true' if a block can be inserted after 'blk' and put in EH region 'regionIndex', else 'false'. // bool Compiler::fgCheckEHCanInsertAfterBlock(BasicBlock* blk, unsigned regionIndex, bool putInTryRegion) { assert(blk != nullptr); assert(regionIndex <= compHndBBtabCount); if (regionIndex == 0) { assert(putInTryRegion); } bool inTryRegion; unsigned nestedRegionIndex = ehGetMostNestedRegionIndex(blk, &inTryRegion); bool insertOK = true; for (;;) { if (nestedRegionIndex == regionIndex) { // This block is in the region we want to be in. We can insert here if it's the right type of region. // (If we want to be in the 'try' region, but the block is in the handler region, then inserting a // new block after 'blk' can't put it in the 'try' region, and vice-versa, since we only consider // extending regions after, not prepending to regions.) // This check will be 'true' if we are trying to put something in the main function (as putInTryRegion // must be 'true' if regionIndex is zero, and inTryRegion will also be 'true' if nestedRegionIndex is zero). insertOK = (putInTryRegion == inTryRegion); break; } else if (nestedRegionIndex == 0) { // The block is in the main function, but we want to put something in a nested region. We can't do that. insertOK = false; break; } assert(nestedRegionIndex > 0); EHblkDsc* ehDsc = ehGetDsc(nestedRegionIndex - 1); // ehGetDsc uses [0..compHndBBtabCount) form. if (inTryRegion) { if (blk != ehDsc->ebdTryLast) { // Not the last block? Then it must be somewhere else within the try region, so we can't insert here. insertOK = false; break; // exit the 'for' loop } } else { // We ignore filters. if (blk != ehDsc->ebdHndLast) { // Not the last block? Then it must be somewhere else within the handler region, so we can't insert // here. insertOK = false; break; // exit the 'for' loop } } // Things look good for this region; check the enclosing regions, if any. nestedRegionIndex = ehGetEnclosingRegionIndex(nestedRegionIndex - 1, &inTryRegion); // ehGetEnclosingRegionIndex uses [0..compHndBBtabCount) form. // Convert to [0..compHndBBtabCount] form. nestedRegionIndex = (nestedRegionIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : nestedRegionIndex + 1; } // end of for(;;) return insertOK; } //------------------------------------------------------------------------ // fgIsFirstBlockOfFilterOrHandler: return true if the given block is the first block of an EH handler // or filter. // // Arguments: // block - the BasicBlock in question // // Return Value: // As described above. // bool Compiler::fgIsFirstBlockOfFilterOrHandler(BasicBlock* block) { if (!block->hasHndIndex()) { return false; } EHblkDsc* ehDsc = ehGetDsc(block->getHndIndex()); if (ehDsc->ebdHndBeg == block) { return true; } if (ehDsc->HasFilter() && (ehDsc->ebdFilter == block)) { return true; } return false; }
1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/gcdump/gcdump.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /***************************************************************************** * GCDump.cpp * * Defines functions to display the GCInfo as defined by the GC-encoding * spec. The GC information may be either dynamically created by a * Just-In-Time compiler conforming to the standard code-manager spec, * or may be persisted by a managed native code compiler conforming * to the standard code-manager spec. */ #ifndef TARGET_UNIX #include "utilcode.h" // For _ASSERTE() #endif //!TARGET_UNIX #include "gcdump.h" /*****************************************************************************/ GCDump::GCDump(UINT32 gcInfoVer, bool encBytes, unsigned maxEncBytes, bool dumpCodeOffs) : gcInfoVersion (gcInfoVer), fDumpEncBytes (encBytes ), cMaxEncBytes (maxEncBytes ), fDumpCodeOffsets(dumpCodeOffs) { // By default, use the standard printf function to dump GCDump::gcPrintf = (printfFtn) ::printf; } /***************************************************************************** * * Display the byte encodings for the given range of the GC tables. */ PTR_CBYTE GCDump::DumpEncoding(PTR_CBYTE gcInfoBlock, size_t cDumpBytes) { _ASSERTE((cDumpBytes >= 0) && (cMaxEncBytes < 256)); if (fDumpEncBytes) { PTR_CBYTE pCurPos; unsigned count; size_t cBytesLeft; for (count = cMaxEncBytes, cBytesLeft = cDumpBytes, pCurPos = gcInfoBlock; count > 0; count--, pCurPos++, cBytesLeft--) { if (cBytesLeft > 0) { if (cBytesLeft > 1 && count == 1) gcPrintf("..."); else gcPrintf("%02X ", *pCurPos); } else gcPrintf(" "); } gcPrintf("| "); } return gcInfoBlock + cDumpBytes; } /*****************************************************************************/ void GCDump::DumpOffset(unsigned o) { gcPrintf("%04X", o); } void GCDump::DumpOffsetEx(unsigned o) { if (fDumpCodeOffsets) DumpOffset(o); } /*****************************************************************************/
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /***************************************************************************** * GCDump.cpp * * Defines functions to display the GCInfo as defined by the GC-encoding * spec. The GC information may be either dynamically created by a * Just-In-Time compiler conforming to the standard code-manager spec, * or may be persisted by a managed native code compiler conforming * to the standard code-manager spec. */ #ifndef TARGET_UNIX #include "utilcode.h" // For _ASSERTE() #endif //!TARGET_UNIX #include "gcdump.h" /*****************************************************************************/ GCDump::GCDump(UINT32 gcInfoVer, bool encBytes, unsigned maxEncBytes, bool dumpCodeOffs) : gcInfoVersion (gcInfoVer), fDumpEncBytes (encBytes ), cMaxEncBytes (maxEncBytes ), fDumpCodeOffsets(dumpCodeOffs) { // By default, use the standard printf function to dump GCDump::gcPrintf = (printfFtn) ::printf; } /***************************************************************************** * * Display the byte encodings for the given range of the GC tables. */ PTR_CBYTE GCDump::DumpEncoding(PTR_CBYTE gcInfoBlock, size_t cDumpBytes) { _ASSERTE((cDumpBytes >= 0) && (cMaxEncBytes < 256)); if (fDumpEncBytes) { PTR_CBYTE pCurPos; unsigned count; size_t cBytesLeft; for (count = cMaxEncBytes, cBytesLeft = cDumpBytes, pCurPos = gcInfoBlock; count > 0; count--, pCurPos++, cBytesLeft--) { if (cBytesLeft > 0) { if (cBytesLeft > 1 && count == 1) gcPrintf("..."); else gcPrintf("%02X ", *pCurPos); } else gcPrintf(" "); } gcPrintf("| "); } return gcInfoBlock + cDumpBytes; } /*****************************************************************************/ void GCDump::DumpOffset(unsigned o) { gcPrintf("%04X", o); } void GCDump::DumpOffsetEx(unsigned o) { if (fDumpCodeOffsets) DumpOffset(o); } /*****************************************************************************/
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/nativeaot/Runtime/rhassert.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __RHASSERT_H__ #define __RHASSERT_H__ #ifdef _MSC_VER #define ASSUME(expr) __assume(expr) #else // _MSC_VER #define ASSUME(expr) do { if (!(expr)) __builtin_unreachable(); } while (0) #endif // _MSC_VER #if defined(_DEBUG) && !defined(DACCESS_COMPILE) #define ASSERT(expr) \ { \ if (!(expr)) { Assert(#expr, __FILE__, __LINE__, NULL); } \ } \ #define ASSERT_MSG(expr, msg) \ { \ if (!(expr)) { Assert(#expr, __FILE__, __LINE__, msg); } \ } \ #define VERIFY(expr) ASSERT((expr)) #define ASSERT_UNCONDITIONALLY(message) \ Assert("ASSERT_UNCONDITIONALLY", __FILE__, __LINE__, message); \ void Assert(const char * expr, const char * file, unsigned int line_num, const char * message); #else #define ASSERT(expr) #define ASSERT_MSG(expr, msg) #define VERIFY(expr) (expr) #define ASSERT_UNCONDITIONALLY(message) #endif #ifndef _ASSERTE #define _ASSERTE(_expr) ASSERT(_expr) #endif #define PORTABILITY_ASSERT(message) \ ASSERT_UNCONDITIONALLY(message); \ ASSUME(0); \ #define UNREACHABLE() \ ASSERT_UNCONDITIONALLY("UNREACHABLE"); \ ASSUME(0); \ #define UNREACHABLE_MSG(message) \ ASSERT_UNCONDITIONALLY(message); \ ASSUME(0); \ #define FAIL_FAST_GENERATE_EXCEPTION_ADDRESS 0x1 #define RhFailFast() PalRaiseFailFastException(NULL, NULL, FAIL_FAST_GENERATE_EXCEPTION_ADDRESS) #endif // __RHASSERT_H__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __RHASSERT_H__ #define __RHASSERT_H__ #ifdef _MSC_VER #define ASSUME(expr) __assume(expr) #else // _MSC_VER #define ASSUME(expr) do { if (!(expr)) __builtin_unreachable(); } while (0) #endif // _MSC_VER #if defined(_DEBUG) && !defined(DACCESS_COMPILE) #define ASSERT(expr) \ { \ if (!(expr)) { Assert(#expr, __FILE__, __LINE__, NULL); } \ } \ #define ASSERT_MSG(expr, msg) \ { \ if (!(expr)) { Assert(#expr, __FILE__, __LINE__, msg); } \ } \ #define VERIFY(expr) ASSERT((expr)) #define ASSERT_UNCONDITIONALLY(message) \ Assert("ASSERT_UNCONDITIONALLY", __FILE__, __LINE__, message); \ void Assert(const char * expr, const char * file, unsigned int line_num, const char * message); #else #define ASSERT(expr) #define ASSERT_MSG(expr, msg) #define VERIFY(expr) (expr) #define ASSERT_UNCONDITIONALLY(message) #endif #ifndef _ASSERTE #define _ASSERTE(_expr) ASSERT(_expr) #endif #define PORTABILITY_ASSERT(message) \ ASSERT_UNCONDITIONALLY(message); \ ASSUME(0); \ #define UNREACHABLE() \ ASSERT_UNCONDITIONALLY("UNREACHABLE"); \ ASSUME(0); \ #define UNREACHABLE_MSG(message) \ ASSERT_UNCONDITIONALLY(message); \ ASSUME(0); \ #define FAIL_FAST_GENERATE_EXCEPTION_ADDRESS 0x1 #define RhFailFast() PalRaiseFailFastException(NULL, NULL, FAIL_FAST_GENERATE_EXCEPTION_ADDRESS) #endif // __RHASSERT_H__
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/native/corehost/hostmisc/utils.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef UTILS_H #define UTILS_H #include "pal.h" #include "trace.h" #include <type_traits> #define _STRINGIFY(s) _X(s) #if defined(_WIN32) #define DOTNET_CORE_INSTALL_PREREQUISITES_URL _X("https://go.microsoft.com/fwlink/?linkid=798306") #elif defined(TARGET_OSX) #define DOTNET_CORE_INSTALL_PREREQUISITES_URL _X("https://go.microsoft.com/fwlink/?linkid=2063366") #else #define DOTNET_CORE_INSTALL_PREREQUISITES_URL _X("https://go.microsoft.com/fwlink/?linkid=2063370") #endif #define DOTNET_CORE_DOWNLOAD_URL _X("https://aka.ms/dotnet-download") #define DOTNET_CORE_APPLAUNCH_URL _X("https://aka.ms/dotnet-core-applaunch") #define RUNTIME_STORE_DIRECTORY_NAME _X("store") bool ends_with(const pal::string_t& value, const pal::string_t& suffix, bool match_case); bool starts_with(const pal::string_t& value, const pal::string_t& prefix, bool match_case); pal::string_t strip_executable_ext(const pal::string_t& filename); pal::string_t get_directory(const pal::string_t& path); pal::string_t strip_file_ext(const pal::string_t& path); pal::string_t get_filename(const pal::string_t& path); pal::string_t get_filename_without_ext(const pal::string_t& path); void append_path(pal::string_t* path1, const pal::char_t* path2); bool library_exists_in_dir(const pal::string_t& lib_dir, const pal::string_t& lib_name, pal::string_t* p_lib_path); bool coreclr_exists_in_dir(const pal::string_t& candidate); void remove_trailing_dir_seperator(pal::string_t* dir); void replace_char(pal::string_t* path, pal::char_t match, pal::char_t repl); pal::string_t get_replaced_char(const pal::string_t& path, pal::char_t match, pal::char_t repl); const pal::char_t* get_arch(); pal::string_t get_current_runtime_id(bool use_fallback); bool get_env_shared_store_dirs(std::vector<pal::string_t>* dirs, const pal::string_t& arch, const pal::string_t& tfm); bool get_global_shared_store_dirs(std::vector<pal::string_t>* dirs, const pal::string_t& arch, const pal::string_t& tfm); bool multilevel_lookup_enabled(); void get_framework_and_sdk_locations(const pal::string_t& dotnet_dir, std::vector<pal::string_t>* locations); bool get_file_path_from_env(const pal::char_t* env_key, pal::string_t* recv); size_t index_of_non_numeric(const pal::string_t& str, size_t i); bool try_stou(const pal::string_t& str, unsigned* num); bool get_dotnet_root_from_env(pal::string_t* used_dotnet_root_env_var_name, pal::string_t* recv); pal::string_t get_deps_from_app_binary(const pal::string_t& app_base, const pal::string_t& app); pal::string_t get_runtime_config_path(const pal::string_t& path, const pal::string_t& name); pal::string_t get_runtime_config_dev_path(const pal::string_t& path, const pal::string_t& name); void get_runtime_config_paths(const pal::string_t& path, const pal::string_t& name, pal::string_t* cfg, pal::string_t* dev_cfg); pal::string_t get_dotnet_root_from_fxr_path(const pal::string_t& fxr_path); // Get a download URL for a specific framework and version // If no framework is specified, a download URL for the runtime is returned pal::string_t get_download_url(const pal::char_t* framework_name = nullptr, const pal::char_t* framework_version = nullptr); pal::string_t to_lower(const pal::char_t* in); pal::string_t to_upper(const pal::char_t* in); // Retrieves environment variable which is only used for testing. // This will return the value of the variable only if the product binary is stamped // with test-only marker. bool test_only_getenv(const pal::char_t* name, pal::string_t* recv); // Helper class to make it easy to propagate error writer to the hostpolicy class propagate_error_writer_t { public: typedef trace::error_writer_fn(__cdecl* set_error_writer_fn)(trace::error_writer_fn error_writer); private: set_error_writer_fn m_set_error_writer; bool m_error_writer_set; public: propagate_error_writer_t(set_error_writer_fn set_error_writer) { // Previous trace messages from the caller module must be printed before calling trace::setup in callee module // The two modules have different trace util instances and thus don't share file IO buffers // Not flushing may lead to traces from before the call being written after the call due to module mismatch. trace::flush(); m_set_error_writer = set_error_writer; m_error_writer_set = false; trace::error_writer_fn error_writer = trace::get_error_writer(); if (error_writer != nullptr && m_set_error_writer != nullptr) { m_set_error_writer(error_writer); m_error_writer_set = true; } } ~propagate_error_writer_t() { if (m_error_writer_set && m_set_error_writer != nullptr) { m_set_error_writer(nullptr); m_error_writer_set = false; } } }; // Helper class to make it easy to change the error writer within a specific scope only. class error_writer_scope_t { private: trace::error_writer_fn m_old_error_writer; public: error_writer_scope_t(trace::error_writer_fn new_error_writer) { m_old_error_writer = trace::set_error_writer(new_error_writer); } ~error_writer_scope_t() { trace::set_error_writer(m_old_error_writer); } }; template<typename T> size_t to_size_t_dbgchecked(T value) { assert(value >= 0); size_t result = static_cast<size_t>(value); assert(static_cast<T>(result) == value); return result; } #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef UTILS_H #define UTILS_H #include "pal.h" #include "trace.h" #include <type_traits> #define _STRINGIFY(s) _X(s) #if defined(_WIN32) #define DOTNET_CORE_INSTALL_PREREQUISITES_URL _X("https://go.microsoft.com/fwlink/?linkid=798306") #elif defined(TARGET_OSX) #define DOTNET_CORE_INSTALL_PREREQUISITES_URL _X("https://go.microsoft.com/fwlink/?linkid=2063366") #else #define DOTNET_CORE_INSTALL_PREREQUISITES_URL _X("https://go.microsoft.com/fwlink/?linkid=2063370") #endif #define DOTNET_CORE_DOWNLOAD_URL _X("https://aka.ms/dotnet-download") #define DOTNET_CORE_APPLAUNCH_URL _X("https://aka.ms/dotnet-core-applaunch") #define RUNTIME_STORE_DIRECTORY_NAME _X("store") bool ends_with(const pal::string_t& value, const pal::string_t& suffix, bool match_case); bool starts_with(const pal::string_t& value, const pal::string_t& prefix, bool match_case); pal::string_t strip_executable_ext(const pal::string_t& filename); pal::string_t get_directory(const pal::string_t& path); pal::string_t strip_file_ext(const pal::string_t& path); pal::string_t get_filename(const pal::string_t& path); pal::string_t get_filename_without_ext(const pal::string_t& path); void append_path(pal::string_t* path1, const pal::char_t* path2); bool library_exists_in_dir(const pal::string_t& lib_dir, const pal::string_t& lib_name, pal::string_t* p_lib_path); bool coreclr_exists_in_dir(const pal::string_t& candidate); void remove_trailing_dir_seperator(pal::string_t* dir); void replace_char(pal::string_t* path, pal::char_t match, pal::char_t repl); pal::string_t get_replaced_char(const pal::string_t& path, pal::char_t match, pal::char_t repl); const pal::char_t* get_arch(); pal::string_t get_current_runtime_id(bool use_fallback); bool get_env_shared_store_dirs(std::vector<pal::string_t>* dirs, const pal::string_t& arch, const pal::string_t& tfm); bool get_global_shared_store_dirs(std::vector<pal::string_t>* dirs, const pal::string_t& arch, const pal::string_t& tfm); bool multilevel_lookup_enabled(); void get_framework_and_sdk_locations(const pal::string_t& dotnet_dir, std::vector<pal::string_t>* locations); bool get_file_path_from_env(const pal::char_t* env_key, pal::string_t* recv); size_t index_of_non_numeric(const pal::string_t& str, size_t i); bool try_stou(const pal::string_t& str, unsigned* num); bool get_dotnet_root_from_env(pal::string_t* used_dotnet_root_env_var_name, pal::string_t* recv); pal::string_t get_deps_from_app_binary(const pal::string_t& app_base, const pal::string_t& app); pal::string_t get_runtime_config_path(const pal::string_t& path, const pal::string_t& name); pal::string_t get_runtime_config_dev_path(const pal::string_t& path, const pal::string_t& name); void get_runtime_config_paths(const pal::string_t& path, const pal::string_t& name, pal::string_t* cfg, pal::string_t* dev_cfg); pal::string_t get_dotnet_root_from_fxr_path(const pal::string_t& fxr_path); // Get a download URL for a specific framework and version // If no framework is specified, a download URL for the runtime is returned pal::string_t get_download_url(const pal::char_t* framework_name = nullptr, const pal::char_t* framework_version = nullptr); pal::string_t to_lower(const pal::char_t* in); pal::string_t to_upper(const pal::char_t* in); // Retrieves environment variable which is only used for testing. // This will return the value of the variable only if the product binary is stamped // with test-only marker. bool test_only_getenv(const pal::char_t* name, pal::string_t* recv); // Helper class to make it easy to propagate error writer to the hostpolicy class propagate_error_writer_t { public: typedef trace::error_writer_fn(__cdecl* set_error_writer_fn)(trace::error_writer_fn error_writer); private: set_error_writer_fn m_set_error_writer; bool m_error_writer_set; public: propagate_error_writer_t(set_error_writer_fn set_error_writer) { // Previous trace messages from the caller module must be printed before calling trace::setup in callee module // The two modules have different trace util instances and thus don't share file IO buffers // Not flushing may lead to traces from before the call being written after the call due to module mismatch. trace::flush(); m_set_error_writer = set_error_writer; m_error_writer_set = false; trace::error_writer_fn error_writer = trace::get_error_writer(); if (error_writer != nullptr && m_set_error_writer != nullptr) { m_set_error_writer(error_writer); m_error_writer_set = true; } } ~propagate_error_writer_t() { if (m_error_writer_set && m_set_error_writer != nullptr) { m_set_error_writer(nullptr); m_error_writer_set = false; } } }; // Helper class to make it easy to change the error writer within a specific scope only. class error_writer_scope_t { private: trace::error_writer_fn m_old_error_writer; public: error_writer_scope_t(trace::error_writer_fn new_error_writer) { m_old_error_writer = trace::set_error_writer(new_error_writer); } ~error_writer_scope_t() { trace::set_error_writer(m_old_error_writer); } }; template<typename T> size_t to_size_t_dbgchecked(T value) { assert(value >= 0); size_t result = static_cast<size_t>(value); assert(static_cast<T>(result) == value); return result; } #endif
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/tests/palsuite/file_io/SetFilePointer/test4/SetFilePointer.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: SetFilePointer.c (test 4) ** ** Purpose: Tests the PAL implementation of the SetFilePointer function. ** Test the FILE_END option ** ** Assumes Successful: ** CreateFile ** ReadFile ** WriteFile ** strlen ** CloseHandle ** strcmp ** GetFileSize ** ** **===================================================================*/ #include <palsuite.h> const char* szText = "The quick brown fox jumped over the lazy dog's back."; PALTEST(file_io_SetFilePointer_test4_paltest_setfilepointer_test4, "file_io/SetFilePointer/test4/paltest_setfilepointer_test4") { HANDLE hFile = NULL; DWORD dwByteCount = 0; DWORD dwOffset = 0; DWORD dwRc = 0; BOOL bRc = FALSE; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } /* create a test file */ hFile = CreateFile(szTextFile, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if(hFile == INVALID_HANDLE_VALUE) { Fail("SetFilePointer: ERROR -> Unable to create file \"%s\".\n", szTextFile); } bRc = WriteFile(hFile, szText, (DWORD)strlen(szText), &dwByteCount, NULL); if (bRc == FALSE) { Trace("SetFilePointer: ERROR -> Unable to write to file \"%s\".\n", szTextFile); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } /* * move -1 from the end */ dwRc = SetFilePointer(hFile, -1, NULL, FILE_END); if (dwRc == INVALID_SET_FILE_POINTER) { if (GetLastError() != ERROR_SUCCESS) { Trace("SetFilePointer: ERROR -> Failed to move the pointer " "back one character from EOF.\n"); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file" " \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file" " \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } } else { /* verify */ if ((dwRc != strlen(szText)-1)) { Trace("SetFilePointer: ERROR -> Failed to move the pointer" " -1 bytes from EOF\n"); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file" " \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file" " \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } } /* * move the file pointer 0 bytes from the end and verify */ dwRc = SetFilePointer(hFile, 0, NULL, FILE_END); if (dwRc != strlen(szText)) { Trace("SetFilePointer: ERROR -> Asked to move 0 bytes from the " "end of the file. Function returned %ld instead of 52.\n", dwRc); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } /* * move the pointer past the end of the file and verify */ dwRc = SetFilePointer(hFile, 20, NULL, FILE_END); if (dwRc != strlen(szText)+20) { Trace("SetFilePointer: ERROR -> Asked to move 20 bytes past the " "end of the file. Function returned %ld instead of %d.\n", dwRc, strlen(szText)+20); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } else { /* verify results */ bRc = SetEndOfFile(hFile); if ((dwRc = GetFileSize(hFile, NULL)) != strlen(szText)+20) { Trace("SetFilePointer: ERROR -> Asked to move back 20 bytes past" " theend of the file. GetFileSize returned %ld whereas it " "should have been %d.\n", dwRc, strlen(szText)+20); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file" " \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file" " \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } } /* * move the pointer backwards to before the start of the file and verify */ dwOffset = (dwRc + 20) * -1; dwRc = SetFilePointer(hFile, dwOffset, NULL, FILE_END); if ((dwRc != INVALID_SET_FILE_POINTER) || (GetLastError() == ERROR_SUCCESS)) { Trace("SetFilePointer: ERROR -> Was able to move the pointer " "to before the beginning of the file.\n"); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file \"%s\".\n", szTextFile); if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } if (!DeleteFileA(szTextFile)) { Fail("SetFilePointer: ERROR -> Unable to delete file \"%s\".\n", szTextFile); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: SetFilePointer.c (test 4) ** ** Purpose: Tests the PAL implementation of the SetFilePointer function. ** Test the FILE_END option ** ** Assumes Successful: ** CreateFile ** ReadFile ** WriteFile ** strlen ** CloseHandle ** strcmp ** GetFileSize ** ** **===================================================================*/ #include <palsuite.h> const char* szText = "The quick brown fox jumped over the lazy dog's back."; PALTEST(file_io_SetFilePointer_test4_paltest_setfilepointer_test4, "file_io/SetFilePointer/test4/paltest_setfilepointer_test4") { HANDLE hFile = NULL; DWORD dwByteCount = 0; DWORD dwOffset = 0; DWORD dwRc = 0; BOOL bRc = FALSE; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } /* create a test file */ hFile = CreateFile(szTextFile, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if(hFile == INVALID_HANDLE_VALUE) { Fail("SetFilePointer: ERROR -> Unable to create file \"%s\".\n", szTextFile); } bRc = WriteFile(hFile, szText, (DWORD)strlen(szText), &dwByteCount, NULL); if (bRc == FALSE) { Trace("SetFilePointer: ERROR -> Unable to write to file \"%s\".\n", szTextFile); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } /* * move -1 from the end */ dwRc = SetFilePointer(hFile, -1, NULL, FILE_END); if (dwRc == INVALID_SET_FILE_POINTER) { if (GetLastError() != ERROR_SUCCESS) { Trace("SetFilePointer: ERROR -> Failed to move the pointer " "back one character from EOF.\n"); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file" " \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file" " \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } } else { /* verify */ if ((dwRc != strlen(szText)-1)) { Trace("SetFilePointer: ERROR -> Failed to move the pointer" " -1 bytes from EOF\n"); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file" " \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file" " \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } } /* * move the file pointer 0 bytes from the end and verify */ dwRc = SetFilePointer(hFile, 0, NULL, FILE_END); if (dwRc != strlen(szText)) { Trace("SetFilePointer: ERROR -> Asked to move 0 bytes from the " "end of the file. Function returned %ld instead of 52.\n", dwRc); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } /* * move the pointer past the end of the file and verify */ dwRc = SetFilePointer(hFile, 20, NULL, FILE_END); if (dwRc != strlen(szText)+20) { Trace("SetFilePointer: ERROR -> Asked to move 20 bytes past the " "end of the file. Function returned %ld instead of %d.\n", dwRc, strlen(szText)+20); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } else { /* verify results */ bRc = SetEndOfFile(hFile); if ((dwRc = GetFileSize(hFile, NULL)) != strlen(szText)+20) { Trace("SetFilePointer: ERROR -> Asked to move back 20 bytes past" " theend of the file. GetFileSize returned %ld whereas it " "should have been %d.\n", dwRc, strlen(szText)+20); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file" " \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file" " \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } } /* * move the pointer backwards to before the start of the file and verify */ dwOffset = (dwRc + 20) * -1; dwRc = SetFilePointer(hFile, dwOffset, NULL, FILE_END); if ((dwRc != INVALID_SET_FILE_POINTER) || (GetLastError() == ERROR_SUCCESS)) { Trace("SetFilePointer: ERROR -> Was able to move the pointer " "to before the beginning of the file.\n"); if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file \"%s\".\n", szTextFile); } if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } if (CloseHandle(hFile) != TRUE) { Trace("SetFilePointer: ERROR -> Unable to close file \"%s\".\n", szTextFile); if (!DeleteFileA(szTextFile)) { Trace("SetFilePointer: ERROR -> Unable to delete file \"%s\".\n", szTextFile); } PAL_TerminateEx(FAIL); return FAIL; } if (!DeleteFileA(szTextFile)) { Fail("SetFilePointer: ERROR -> Unable to delete file \"%s\".\n", szTextFile); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/utilcode/configuration.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // -------------------------------------------------------------------------------------------------- // configuration.cpp // // // Access and update configuration values, falling back on legacy CLRConfig methods where necessary. // // -------------------------------------------------------------------------------------------------- #include "stdafx.h" #include "clrconfig.h" #include "configuration.h" LPCWSTR *knobNames = nullptr; LPCWSTR *knobValues = nullptr; int numberOfKnobs = 0; void Configuration::InitializeConfigurationKnobs(int numberOfConfigs, LPCWSTR *names, LPCWSTR *values) { numberOfKnobs = numberOfConfigs; // Neither should be null, or both should be null _ASSERT(!((names == nullptr) ^ (values == nullptr))); knobNames = names; knobValues = values; } static LPCWSTR GetConfigurationValue(LPCWSTR name) { _ASSERT(name != nullptr); if (name == nullptr || knobNames == nullptr || knobValues == nullptr) { return nullptr; } for (int i = 0; i < numberOfKnobs; ++i) { _ASSERT(knobNames[i] != nullptr); if (wcscmp(name, knobNames[i]) == 0) { return knobValues[i]; } } return nullptr; } DWORD Configuration::GetKnobDWORDValue(LPCWSTR name, const CLRConfig::ConfigDWORDInfo& dwordInfo) { bool returnedDefaultValue; DWORD legacyValue = CLRConfig::GetConfigValue(dwordInfo, &returnedDefaultValue); if (!returnedDefaultValue) { return legacyValue; } LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return wcstoul(knobValue, nullptr, 0); } return legacyValue; } DWORD Configuration::GetKnobDWORDValue(LPCWSTR name, DWORD defaultValue) { LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return wcstoul(knobValue, nullptr, 0); } return defaultValue; } ULONGLONG Configuration::GetKnobULONGLONGValue(LPCWSTR name, ULONGLONG defaultValue) { LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return _wcstoui64(knobValue, nullptr, 0); } return defaultValue; } LPCWSTR Configuration::GetKnobStringValue(LPCWSTR name, const CLRConfig::ConfigStringInfo& stringInfo) { LPCWSTR value = CLRConfig::GetConfigValue(stringInfo); if (value == nullptr) { value = GetConfigurationValue(name); } return value; } LPCWSTR Configuration::GetKnobStringValue(LPCWSTR name) { return GetConfigurationValue(name); } bool Configuration::GetKnobBooleanValue(LPCWSTR name, const CLRConfig::ConfigDWORDInfo& dwordInfo) { bool returnedDefaultValue; DWORD legacyValue = CLRConfig::GetConfigValue(dwordInfo, &returnedDefaultValue); if (!returnedDefaultValue) { return (legacyValue != 0); } LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return (wcscmp(knobValue, W("true")) == 0); } return (legacyValue != 0); } bool Configuration::GetKnobBooleanValue(LPCWSTR name, bool defaultValue) { LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return (wcscmp(knobValue, W("true")) == 0); } return defaultValue; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // -------------------------------------------------------------------------------------------------- // configuration.cpp // // // Access and update configuration values, falling back on legacy CLRConfig methods where necessary. // // -------------------------------------------------------------------------------------------------- #include "stdafx.h" #include "clrconfig.h" #include "configuration.h" LPCWSTR *knobNames = nullptr; LPCWSTR *knobValues = nullptr; int numberOfKnobs = 0; void Configuration::InitializeConfigurationKnobs(int numberOfConfigs, LPCWSTR *names, LPCWSTR *values) { numberOfKnobs = numberOfConfigs; // Neither should be null, or both should be null _ASSERT(!((names == nullptr) ^ (values == nullptr))); knobNames = names; knobValues = values; } static LPCWSTR GetConfigurationValue(LPCWSTR name) { _ASSERT(name != nullptr); if (name == nullptr || knobNames == nullptr || knobValues == nullptr) { return nullptr; } for (int i = 0; i < numberOfKnobs; ++i) { _ASSERT(knobNames[i] != nullptr); if (wcscmp(name, knobNames[i]) == 0) { return knobValues[i]; } } return nullptr; } DWORD Configuration::GetKnobDWORDValue(LPCWSTR name, const CLRConfig::ConfigDWORDInfo& dwordInfo) { bool returnedDefaultValue; DWORD legacyValue = CLRConfig::GetConfigValue(dwordInfo, &returnedDefaultValue); if (!returnedDefaultValue) { return legacyValue; } LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return wcstoul(knobValue, nullptr, 0); } return legacyValue; } DWORD Configuration::GetKnobDWORDValue(LPCWSTR name, DWORD defaultValue) { LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return wcstoul(knobValue, nullptr, 0); } return defaultValue; } ULONGLONG Configuration::GetKnobULONGLONGValue(LPCWSTR name, ULONGLONG defaultValue) { LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return _wcstoui64(knobValue, nullptr, 0); } return defaultValue; } LPCWSTR Configuration::GetKnobStringValue(LPCWSTR name, const CLRConfig::ConfigStringInfo& stringInfo) { LPCWSTR value = CLRConfig::GetConfigValue(stringInfo); if (value == nullptr) { value = GetConfigurationValue(name); } return value; } LPCWSTR Configuration::GetKnobStringValue(LPCWSTR name) { return GetConfigurationValue(name); } bool Configuration::GetKnobBooleanValue(LPCWSTR name, const CLRConfig::ConfigDWORDInfo& dwordInfo) { bool returnedDefaultValue; DWORD legacyValue = CLRConfig::GetConfigValue(dwordInfo, &returnedDefaultValue); if (!returnedDefaultValue) { return (legacyValue != 0); } LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return (wcscmp(knobValue, W("true")) == 0); } return (legacyValue != 0); } bool Configuration::GetKnobBooleanValue(LPCWSTR name, bool defaultValue) { LPCWSTR knobValue = GetConfigurationValue(name); if (knobValue != nullptr) { return (wcscmp(knobValue, W("true")) == 0); } return defaultValue; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/Interop/PInvoke/Generics/GenericsNative.VectorB.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdio.h> #include <stdint.h> #include <xplatform.h> #include <platformdefines.h> typedef struct { bool e00; bool e01; bool e02; bool e03; bool e04; bool e05; bool e06; bool e07; bool e08; bool e09; bool e10; bool e11; bool e12; bool e13; bool e14; bool e15; } VectorB128; typedef struct { bool e00; bool e01; bool e02; bool e03; bool e04; bool e05; bool e06; bool e07; bool e08; bool e09; bool e10; bool e11; bool e12; bool e13; bool e14; bool e15; bool e16; bool e17; bool e18; bool e19; bool e20; bool e21; bool e22; bool e23; bool e24; bool e25; bool e26; bool e27; bool e28; bool e29; bool e30; bool e31; } VectorB256; static VectorB128 VectorB128Value = { }; static VectorB256 VectorB256Value = { }; extern "C" DLL_EXPORT VectorB128 STDMETHODCALLTYPE GetVectorB128(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15) { bool value[16] = { e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15 }; return *reinterpret_cast<VectorB128*>(value); } extern "C" DLL_EXPORT VectorB256 STDMETHODCALLTYPE GetVectorB256(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, bool e16, bool e17, bool e18, bool e19, bool e20, bool e21, bool e22, bool e23, bool e24, bool e25, bool e26, bool e27, bool e28, bool e29, bool e30, bool e31) { bool value[32] = { e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31 }; return *reinterpret_cast<VectorB256*>(value); } extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVectorB128Out(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, VectorB128* pValue) { *pValue = GetVectorB128(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15); } extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVectorB256Out(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, bool e16, bool e17, bool e18, bool e19, bool e20, bool e21, bool e22, bool e23, bool e24, bool e25, bool e26, bool e27, bool e28, bool e29, bool e30, bool e31, VectorB256* pValue) { *pValue = GetVectorB256(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31); } extern "C" DLL_EXPORT const VectorB128* STDMETHODCALLTYPE GetVectorB128Ptr(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15) { GetVectorB128Out(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, &VectorB128Value); return &VectorB128Value; } extern "C" DLL_EXPORT const VectorB256* STDMETHODCALLTYPE GetVectorB256Ptr(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, bool e16, bool e17, bool e18, bool e19, bool e20, bool e21, bool e22, bool e23, bool e24, bool e25, bool e26, bool e27, bool e28, bool e29, bool e30, bool e31) { GetVectorB256Out(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, &VectorB256Value); return &VectorB256Value; } extern "C" DLL_EXPORT VectorB128 STDMETHODCALLTYPE AddVectorB128(VectorB128 lhs, VectorB128 rhs) { throw "P/Invoke for Vector<char> should be unsupported."; } extern "C" DLL_EXPORT VectorB256 STDMETHODCALLTYPE AddVectorB256(VectorB256 lhs, VectorB256 rhs) { throw "P/Invoke for Vector<char> should be unsupported."; } extern "C" DLL_EXPORT VectorB128 STDMETHODCALLTYPE AddVectorB128s(const VectorB128* pValues, uint32_t count) { throw "P/Invoke for Vector<char> should be unsupported."; } extern "C" DLL_EXPORT VectorB256 STDMETHODCALLTYPE AddVectorB256s(const VectorB256* pValues, uint32_t count) { throw "P/Invoke for Vector<char> should be unsupported."; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdio.h> #include <stdint.h> #include <xplatform.h> #include <platformdefines.h> typedef struct { bool e00; bool e01; bool e02; bool e03; bool e04; bool e05; bool e06; bool e07; bool e08; bool e09; bool e10; bool e11; bool e12; bool e13; bool e14; bool e15; } VectorB128; typedef struct { bool e00; bool e01; bool e02; bool e03; bool e04; bool e05; bool e06; bool e07; bool e08; bool e09; bool e10; bool e11; bool e12; bool e13; bool e14; bool e15; bool e16; bool e17; bool e18; bool e19; bool e20; bool e21; bool e22; bool e23; bool e24; bool e25; bool e26; bool e27; bool e28; bool e29; bool e30; bool e31; } VectorB256; static VectorB128 VectorB128Value = { }; static VectorB256 VectorB256Value = { }; extern "C" DLL_EXPORT VectorB128 STDMETHODCALLTYPE GetVectorB128(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15) { bool value[16] = { e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15 }; return *reinterpret_cast<VectorB128*>(value); } extern "C" DLL_EXPORT VectorB256 STDMETHODCALLTYPE GetVectorB256(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, bool e16, bool e17, bool e18, bool e19, bool e20, bool e21, bool e22, bool e23, bool e24, bool e25, bool e26, bool e27, bool e28, bool e29, bool e30, bool e31) { bool value[32] = { e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31 }; return *reinterpret_cast<VectorB256*>(value); } extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVectorB128Out(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, VectorB128* pValue) { *pValue = GetVectorB128(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15); } extern "C" DLL_EXPORT void STDMETHODCALLTYPE GetVectorB256Out(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, bool e16, bool e17, bool e18, bool e19, bool e20, bool e21, bool e22, bool e23, bool e24, bool e25, bool e26, bool e27, bool e28, bool e29, bool e30, bool e31, VectorB256* pValue) { *pValue = GetVectorB256(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31); } extern "C" DLL_EXPORT const VectorB128* STDMETHODCALLTYPE GetVectorB128Ptr(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15) { GetVectorB128Out(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, &VectorB128Value); return &VectorB128Value; } extern "C" DLL_EXPORT const VectorB256* STDMETHODCALLTYPE GetVectorB256Ptr(bool e00, bool e01, bool e02, bool e03, bool e04, bool e05, bool e06, bool e07, bool e08, bool e09, bool e10, bool e11, bool e12, bool e13, bool e14, bool e15, bool e16, bool e17, bool e18, bool e19, bool e20, bool e21, bool e22, bool e23, bool e24, bool e25, bool e26, bool e27, bool e28, bool e29, bool e30, bool e31) { GetVectorB256Out(e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, &VectorB256Value); return &VectorB256Value; } extern "C" DLL_EXPORT VectorB128 STDMETHODCALLTYPE AddVectorB128(VectorB128 lhs, VectorB128 rhs) { throw "P/Invoke for Vector<char> should be unsupported."; } extern "C" DLL_EXPORT VectorB256 STDMETHODCALLTYPE AddVectorB256(VectorB256 lhs, VectorB256 rhs) { throw "P/Invoke for Vector<char> should be unsupported."; } extern "C" DLL_EXPORT VectorB128 STDMETHODCALLTYPE AddVectorB128s(const VectorB128* pValues, uint32_t count) { throw "P/Invoke for Vector<char> should be unsupported."; } extern "C" DLL_EXPORT VectorB256 STDMETHODCALLTYPE AddVectorB256s(const VectorB256* pValues, uint32_t count) { throw "P/Invoke for Vector<char> should be unsupported."; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/tests/palsuite/filemapping_memmgt/GetModuleFileNameW/test1/GetModuleFileNameW.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================= ** ** Source: getmodulefilenamew.c ** ** Purpose: Test the GetModuleFileNameW to retrieve the specified module ** full path and file name in UNICODE. ** ** **============================================================*/ #define UNICODE #include <palsuite.h> #define MODULENAMEBUFFERSIZE 1024 /* SHLEXT is defined only for Unix variants */ #if defined(SHLEXT) #define ModuleName "librotor_pal"SHLEXT #define Delimiter "/" #else #define ModuleName "rotor_pal.dll" #define Delimiter "\\" #endif PALTEST(filemapping_memmgt_GetModuleFileNameW_test1_paltest_getmodulefilenamew_test1, "filemapping_memmgt/GetModuleFileNameW/test1/paltest_getmodulefilenamew_test1") { HMODULE ModuleHandle; int err; WCHAR *lpModuleName; DWORD ModuleNameLength; WCHAR *ModuleFileNameBuf; char* TempBuf = NULL; char* LastBuf = NULL; char NewModuleFileNameBuf[MODULENAMEBUFFERSIZE+200] = ""; //Initialize the PAL environment err = PAL_Initialize(argc, argv); if(0 != err) { ExitProcess(FAIL); } ModuleFileNameBuf = (WCHAR*)malloc(MODULENAMEBUFFERSIZE*sizeof(WCHAR)); //convert a normal string to a wide one lpModuleName = convert(ModuleName); //load a module ModuleHandle = LoadLibrary(lpModuleName); //free the memory free(lpModuleName); if(!ModuleHandle) { Fail("Failed to call LoadLibrary API!\n"); } //retrieve the specified module full path and file name ModuleNameLength = GetModuleFileName( ModuleHandle,//specified module handle ModuleFileNameBuf,//buffer for module file name MODULENAMEBUFFERSIZE); //convert a wide full path name to a normal one strcpy(NewModuleFileNameBuf,convertC(ModuleFileNameBuf)); //strip out all full path TempBuf = strtok(NewModuleFileNameBuf,Delimiter); LastBuf = TempBuf; while(NULL != TempBuf) { LastBuf = TempBuf; TempBuf = strtok(NULL,Delimiter); } //free the memory free(ModuleFileNameBuf); if(0 == ModuleNameLength || strcmp(ModuleName,LastBuf)) { Trace("\nFailed to all GetModuleFileName API!\n"); err = FreeLibrary(ModuleHandle); if(0 == err) { Fail("\nFailed to all FreeLibrary API!\n"); } Fail(""); } //decrement the reference count of the loaded dll err = FreeLibrary(ModuleHandle); if(0 == err) { Fail("\nFailed to all FreeLibrary API!\n"); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================= ** ** Source: getmodulefilenamew.c ** ** Purpose: Test the GetModuleFileNameW to retrieve the specified module ** full path and file name in UNICODE. ** ** **============================================================*/ #define UNICODE #include <palsuite.h> #define MODULENAMEBUFFERSIZE 1024 /* SHLEXT is defined only for Unix variants */ #if defined(SHLEXT) #define ModuleName "librotor_pal"SHLEXT #define Delimiter "/" #else #define ModuleName "rotor_pal.dll" #define Delimiter "\\" #endif PALTEST(filemapping_memmgt_GetModuleFileNameW_test1_paltest_getmodulefilenamew_test1, "filemapping_memmgt/GetModuleFileNameW/test1/paltest_getmodulefilenamew_test1") { HMODULE ModuleHandle; int err; WCHAR *lpModuleName; DWORD ModuleNameLength; WCHAR *ModuleFileNameBuf; char* TempBuf = NULL; char* LastBuf = NULL; char NewModuleFileNameBuf[MODULENAMEBUFFERSIZE+200] = ""; //Initialize the PAL environment err = PAL_Initialize(argc, argv); if(0 != err) { ExitProcess(FAIL); } ModuleFileNameBuf = (WCHAR*)malloc(MODULENAMEBUFFERSIZE*sizeof(WCHAR)); //convert a normal string to a wide one lpModuleName = convert(ModuleName); //load a module ModuleHandle = LoadLibrary(lpModuleName); //free the memory free(lpModuleName); if(!ModuleHandle) { Fail("Failed to call LoadLibrary API!\n"); } //retrieve the specified module full path and file name ModuleNameLength = GetModuleFileName( ModuleHandle,//specified module handle ModuleFileNameBuf,//buffer for module file name MODULENAMEBUFFERSIZE); //convert a wide full path name to a normal one strcpy(NewModuleFileNameBuf,convertC(ModuleFileNameBuf)); //strip out all full path TempBuf = strtok(NewModuleFileNameBuf,Delimiter); LastBuf = TempBuf; while(NULL != TempBuf) { LastBuf = TempBuf; TempBuf = strtok(NULL,Delimiter); } //free the memory free(ModuleFileNameBuf); if(0 == ModuleNameLength || strcmp(ModuleName,LastBuf)) { Trace("\nFailed to all GetModuleFileName API!\n"); err = FreeLibrary(ModuleHandle); if(0 == err) { Fail("\nFailed to all FreeLibrary API!\n"); } Fail(""); } //decrement the reference count of the loaded dll err = FreeLibrary(ModuleHandle); if(0 == err) { Fail("\nFailed to all FreeLibrary API!\n"); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/mono/mono/utils/mono-utility-thread.h
/** * \file * A lightweight worker thread with lockless messaging * * Author: * Alexander Kyte ([email protected]) * * (C) 2018 Microsoft, Inc. * */ #ifndef __MONO_UTILITY_THREAD_H__ #define __MONO_UTILITY_THREAD_H__ #include <glib.h> #include <mono/utils/mono-threads.h> #include <mono/utils/lock-free-queue.h> #include <mono/utils/lock-free-alloc.h> #include <mono/utils/mono-os-semaphore.h> #define MONO_PRINT_DROPPED_MESSAGES 0 typedef struct { void (*early_init) (gpointer *state_ptr); void (*init) (gpointer *state_ptr); void (*command) (gpointer state_ptr, gpointer message_ptr, gboolean at_shutdown); void (*cleanup) (gpointer state_ptr); } MonoUtilityThreadCallbacks; typedef struct { MonoNativeThreadId thread_id; MonoLockFreeQueue work_queue; MonoSemType work_queue_sem; gboolean run_thread; MonoLockFreeAllocator message_allocator; MonoLockFreeAllocSizeClass message_size_class; size_t message_block_size; size_t payload_size; gpointer state_ptr; MonoUtilityThreadCallbacks callbacks; } MonoUtilityThread; MonoUtilityThread * mono_utility_thread_launch (size_t payload_size, MonoUtilityThreadCallbacks *callbacks, MonoMemAccountType accountType); void mono_utility_thread_send (MonoUtilityThread *thread, gpointer message); gboolean mono_utility_thread_send_sync (MonoUtilityThread *thread, gpointer message); void mono_utility_thread_stop (MonoUtilityThread *thread); #endif /* __MONO_UTILITY_THREAD_H__ */
/** * \file * A lightweight worker thread with lockless messaging * * Author: * Alexander Kyte ([email protected]) * * (C) 2018 Microsoft, Inc. * */ #ifndef __MONO_UTILITY_THREAD_H__ #define __MONO_UTILITY_THREAD_H__ #include <glib.h> #include <mono/utils/mono-threads.h> #include <mono/utils/lock-free-queue.h> #include <mono/utils/lock-free-alloc.h> #include <mono/utils/mono-os-semaphore.h> #define MONO_PRINT_DROPPED_MESSAGES 0 typedef struct { void (*early_init) (gpointer *state_ptr); void (*init) (gpointer *state_ptr); void (*command) (gpointer state_ptr, gpointer message_ptr, gboolean at_shutdown); void (*cleanup) (gpointer state_ptr); } MonoUtilityThreadCallbacks; typedef struct { MonoNativeThreadId thread_id; MonoLockFreeQueue work_queue; MonoSemType work_queue_sem; gboolean run_thread; MonoLockFreeAllocator message_allocator; MonoLockFreeAllocSizeClass message_size_class; size_t message_block_size; size_t payload_size; gpointer state_ptr; MonoUtilityThreadCallbacks callbacks; } MonoUtilityThread; MonoUtilityThread * mono_utility_thread_launch (size_t payload_size, MonoUtilityThreadCallbacks *callbacks, MonoMemAccountType accountType); void mono_utility_thread_send (MonoUtilityThread *thread, gpointer message); gboolean mono_utility_thread_send_sync (MonoUtilityThread *thread, gpointer message); void mono_utility_thread_stop (MonoUtilityThread *thread); #endif /* __MONO_UTILITY_THREAD_H__ */
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/inc/debugmacros.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // DebugMacros.h // // Wrappers for Debugging purposes. // //***************************************************************************** #ifndef __DebugMacros_h__ #define __DebugMacros_h__ #include "stacktrace.h" #include "debugmacrosext.h" #include "palclr.h" #undef _ASSERTE #undef VERIFY #ifdef __cplusplus extern "C" { #endif // __cplusplus #if defined(_DEBUG) class SString; bool GetStackTraceAtContext(SString & s, struct _CONTEXT * pContext); void _cdecl DbgWriteEx(LPCTSTR szFmt, ...); bool _DbgBreakCheck(LPCSTR szFile, int iLine, LPCSTR szExpr, BOOL fConstrained = FALSE); extern VOID ANALYZER_NORETURN DbgAssertDialog(const char *szFile, int iLine, const char *szExpr); #define TRACE_BUFF_SIZE (cchMaxAssertStackLevelStringLen * cfrMaxAssertStackLevels + cchMaxAssertExprLen + 1) extern char g_szExprWithStack[TRACE_BUFF_SIZE]; extern int _DbgBreakCount; #define PRE_ASSERTE /* if you need to change modes before doing asserts override */ #define POST_ASSERTE /* put it back */ #if !defined(_ASSERTE_MSG) #define _ASSERTE_MSG(expr, msg) \ do { \ if (!(expr)) { \ PRE_ASSERTE \ DbgAssertDialog(__FILE__, __LINE__, msg); \ POST_ASSERTE \ } \ } while (0) #endif // _ASSERTE_MSG #if !defined(_ASSERTE) #define _ASSERTE(expr) _ASSERTE_MSG(expr, #expr) #endif // !_ASSERTE #define VERIFY(stmt) _ASSERTE((stmt)) #define _ASSERTE_ALL_BUILDS(file, expr) _ASSERTE((expr)) #define FreeBuildDebugBreak() DebugBreak() #else // !_DEBUG #define _DbgBreakCount 0 #define _ASSERTE(expr) ((void)0) #define _ASSERTE_MSG(expr, msg) ((void)0) #define VERIFY(stmt) (void)(stmt) void __FreeBuildDebugBreak(); void DECLSPEC_NORETURN __FreeBuildAssertFail(const char *szFile, int iLine, const char *szExpr); #define FreeBuildDebugBreak() __FreeBuildDebugBreak() // At this point, EEPOLICY_HANDLE_FATAL_ERROR may or may not be defined. It will be defined // if we are building the VM folder, but outside VM, its not necessarily defined. // // Thus, if EEPOLICY_HANDLE_FATAL_ERROR is not defined, we will call into __FreeBuildAssertFail, // but if it is defined, we will use it. // // Failing here implies an error in the runtime - hence we use COR_E_EXECUTIONENGINE. #ifdef EEPOLICY_HANDLE_FATAL_ERROR #define _ASSERTE_ALL_BUILDS(file, expr) if (!(expr)) EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); #else // !EEPOLICY_HANDLE_FATAL_ERROR #define _ASSERTE_ALL_BUILDS(file, expr) if (!(expr)) __FreeBuildAssertFail(file, __LINE__, #expr); #endif // EEPOLICY_HANDLE_FATAL_ERROR #endif #define ASSERT_AND_CHECK(x) { \ BOOL bResult = x; \ if (!bResult) \ { \ _ASSERTE(x); \ return FALSE; \ } \ } #ifdef _DEBUG_IMPL // A macro to execute a statement only in _DEBUG_IMPL. #define DEBUG_IMPL_STMT(stmt) stmt #define _ASSERTE_IMPL(expr) _ASSERTE((expr)) #if defined(_M_IX86) #if defined(_MSC_VER) #define _DbgBreak() __asm { int 3 } #elif defined(__GNUC__) #define _DbgBreak() __asm__ ("int $3"); #else #error Unknown compiler #endif #else #define _DbgBreak() DebugBreak() #endif extern VOID DebBreak(); extern VOID DebBreakHr(HRESULT hr); #ifndef IfFailGoto #define IfFailGoto(EXPR, LABEL) \ do { hr = (EXPR); if(FAILED(hr)) { DebBreakHr(hr); goto LABEL; } } while (0) #endif #ifndef IfFailRet #define IfFailRet(EXPR) \ do { hr = (EXPR); if(FAILED(hr)) { DebBreakHr(hr); return (hr); } } while (0) #endif #ifndef IfFailWin32Ret #define IfFailWin32Ret(EXPR) \ do { hr = (EXPR); if(hr != ERROR_SUCCESS) { hr = HRESULT_FROM_WIN32(hr); DebBreakHr(hr); return hr;} } while (0) #endif #ifndef IfFailWin32Goto #define IfFailWin32Goto(EXPR, LABEL) \ do { hr = (EXPR); if(hr != ERROR_SUCCESS) { hr = HRESULT_FROM_WIN32(hr); DebBreakHr(hr); goto LABEL; } } while (0) #endif #ifndef IfFailGo #define IfFailGo(EXPR) IfFailGoto(EXPR, ErrExit) #endif #ifndef IfFailWin32Go #define IfFailWin32Go(EXPR) IfFailWin32Goto(EXPR, ErrExit) #endif #else // _DEBUG_IMPL #define _DbgBreak() {} #define DEBUG_IMPL_STMT(stmt) #define _ASSERTE_IMPL(expr) #define IfFailGoto(EXPR, LABEL) \ do { hr = (EXPR); if(FAILED(hr)) { goto LABEL; } } while (0) #define IfFailRet(EXPR) \ do { hr = (EXPR); if(FAILED(hr)) { return (hr); } } while (0) #define IfFailWin32Ret(EXPR) \ do { hr = (EXPR); if(hr != ERROR_SUCCESS) { hr = HRESULT_FROM_WIN32(hr); return hr;} } while (0) #define IfFailWin32Goto(EXPR, LABEL) \ do { hr = (EXPR); if(hr != ERROR_SUCCESS) { hr = HRESULT_FROM_WIN32(hr); goto LABEL; } } while (0) #define IfFailGo(EXPR) IfFailGoto(EXPR, ErrExit) #define IfFailWin32Go(EXPR) IfFailWin32Goto(EXPR, ErrExit) #endif // _DEBUG_IMPL #define IfNullGoto(EXPR, LABEL) \ do { if ((EXPR) == NULL) { OutOfMemory(); IfFailGoto(E_OUTOFMEMORY, LABEL); } } while (false) #ifndef IfNullRet #define IfNullRet(EXPR) \ do { if ((EXPR) == NULL) { OutOfMemory(); return E_OUTOFMEMORY; } } while (false) #endif //!IfNullRet #define IfNullGo(EXPR) IfNullGoto(EXPR, ErrExit) #ifdef __cplusplus } #endif // __cplusplus #undef assert #define assert _ASSERTE #undef _ASSERT #define _ASSERT _ASSERTE #if defined(_DEBUG) && defined(HOST_WINDOWS) // This function returns the EXE time stamp (effectively a random number) // Under retail it always returns 0. This is meant to be used in the // RandomOnExe macro unsigned DbgGetEXETimeStamp(); // returns true 'fractionOn' amount of the time using the EXE timestamp // as the random number seed. For example DbgRandomOnExe(.1) returns true 1/10 // of the time. We use the line number so that different uses of DbgRandomOnExe // will not be coorelated with each other (9973 is prime). Returns false on a retail build #define DbgRandomOnHashAndExe(hash, fractionOn) \ (((DbgGetEXETimeStamp() * __LINE__ * ((hash) ? (hash) : 1)) % 9973) < \ unsigned((fractionOn) * 9973)) #define DbgRandomOnExe(fractionOn) DbgRandomOnHashAndExe(0, fractionOn) #define DbgRandomOnStringAndExe(string, fractionOn) DbgRandomOnHashAndExe(HashStringA(string), fractionOn) #else #define DbgGetEXETimeStamp() 0 #define DbgRandomOnHashAndExe(hash, fractionOn) 0 #define DbgRandomOnExe(fractionOn) 0 #define DbgRandomOnStringAndExe(fractionOn) 0 #endif // _DEBUG && !FEATUREPAL #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // DebugMacros.h // // Wrappers for Debugging purposes. // //***************************************************************************** #ifndef __DebugMacros_h__ #define __DebugMacros_h__ #include "stacktrace.h" #include "debugmacrosext.h" #include "palclr.h" #undef _ASSERTE #undef VERIFY #ifdef __cplusplus extern "C" { #endif // __cplusplus #if defined(_DEBUG) class SString; bool GetStackTraceAtContext(SString & s, struct _CONTEXT * pContext); void _cdecl DbgWriteEx(LPCTSTR szFmt, ...); bool _DbgBreakCheck(LPCSTR szFile, int iLine, LPCSTR szExpr, BOOL fConstrained = FALSE); extern VOID ANALYZER_NORETURN DbgAssertDialog(const char *szFile, int iLine, const char *szExpr); #define TRACE_BUFF_SIZE (cchMaxAssertStackLevelStringLen * cfrMaxAssertStackLevels + cchMaxAssertExprLen + 1) extern char g_szExprWithStack[TRACE_BUFF_SIZE]; extern int _DbgBreakCount; #define PRE_ASSERTE /* if you need to change modes before doing asserts override */ #define POST_ASSERTE /* put it back */ #if !defined(_ASSERTE_MSG) #define _ASSERTE_MSG(expr, msg) \ do { \ if (!(expr)) { \ PRE_ASSERTE \ DbgAssertDialog(__FILE__, __LINE__, msg); \ POST_ASSERTE \ } \ } while (0) #endif // _ASSERTE_MSG #if !defined(_ASSERTE) #define _ASSERTE(expr) _ASSERTE_MSG(expr, #expr) #endif // !_ASSERTE #define VERIFY(stmt) _ASSERTE((stmt)) #define _ASSERTE_ALL_BUILDS(file, expr) _ASSERTE((expr)) #define FreeBuildDebugBreak() DebugBreak() #else // !_DEBUG #define _DbgBreakCount 0 #define _ASSERTE(expr) ((void)0) #define _ASSERTE_MSG(expr, msg) ((void)0) #define VERIFY(stmt) (void)(stmt) void __FreeBuildDebugBreak(); void DECLSPEC_NORETURN __FreeBuildAssertFail(const char *szFile, int iLine, const char *szExpr); #define FreeBuildDebugBreak() __FreeBuildDebugBreak() // At this point, EEPOLICY_HANDLE_FATAL_ERROR may or may not be defined. It will be defined // if we are building the VM folder, but outside VM, its not necessarily defined. // // Thus, if EEPOLICY_HANDLE_FATAL_ERROR is not defined, we will call into __FreeBuildAssertFail, // but if it is defined, we will use it. // // Failing here implies an error in the runtime - hence we use COR_E_EXECUTIONENGINE. #ifdef EEPOLICY_HANDLE_FATAL_ERROR #define _ASSERTE_ALL_BUILDS(file, expr) if (!(expr)) EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); #else // !EEPOLICY_HANDLE_FATAL_ERROR #define _ASSERTE_ALL_BUILDS(file, expr) if (!(expr)) __FreeBuildAssertFail(file, __LINE__, #expr); #endif // EEPOLICY_HANDLE_FATAL_ERROR #endif #define ASSERT_AND_CHECK(x) { \ BOOL bResult = x; \ if (!bResult) \ { \ _ASSERTE(x); \ return FALSE; \ } \ } #ifdef _DEBUG_IMPL // A macro to execute a statement only in _DEBUG_IMPL. #define DEBUG_IMPL_STMT(stmt) stmt #define _ASSERTE_IMPL(expr) _ASSERTE((expr)) #if defined(_M_IX86) #if defined(_MSC_VER) #define _DbgBreak() __asm { int 3 } #elif defined(__GNUC__) #define _DbgBreak() __asm__ ("int $3"); #else #error Unknown compiler #endif #else #define _DbgBreak() DebugBreak() #endif extern VOID DebBreak(); extern VOID DebBreakHr(HRESULT hr); #ifndef IfFailGoto #define IfFailGoto(EXPR, LABEL) \ do { hr = (EXPR); if(FAILED(hr)) { DebBreakHr(hr); goto LABEL; } } while (0) #endif #ifndef IfFailRet #define IfFailRet(EXPR) \ do { hr = (EXPR); if(FAILED(hr)) { DebBreakHr(hr); return (hr); } } while (0) #endif #ifndef IfFailWin32Ret #define IfFailWin32Ret(EXPR) \ do { hr = (EXPR); if(hr != ERROR_SUCCESS) { hr = HRESULT_FROM_WIN32(hr); DebBreakHr(hr); return hr;} } while (0) #endif #ifndef IfFailWin32Goto #define IfFailWin32Goto(EXPR, LABEL) \ do { hr = (EXPR); if(hr != ERROR_SUCCESS) { hr = HRESULT_FROM_WIN32(hr); DebBreakHr(hr); goto LABEL; } } while (0) #endif #ifndef IfFailGo #define IfFailGo(EXPR) IfFailGoto(EXPR, ErrExit) #endif #ifndef IfFailWin32Go #define IfFailWin32Go(EXPR) IfFailWin32Goto(EXPR, ErrExit) #endif #else // _DEBUG_IMPL #define _DbgBreak() {} #define DEBUG_IMPL_STMT(stmt) #define _ASSERTE_IMPL(expr) #define IfFailGoto(EXPR, LABEL) \ do { hr = (EXPR); if(FAILED(hr)) { goto LABEL; } } while (0) #define IfFailRet(EXPR) \ do { hr = (EXPR); if(FAILED(hr)) { return (hr); } } while (0) #define IfFailWin32Ret(EXPR) \ do { hr = (EXPR); if(hr != ERROR_SUCCESS) { hr = HRESULT_FROM_WIN32(hr); return hr;} } while (0) #define IfFailWin32Goto(EXPR, LABEL) \ do { hr = (EXPR); if(hr != ERROR_SUCCESS) { hr = HRESULT_FROM_WIN32(hr); goto LABEL; } } while (0) #define IfFailGo(EXPR) IfFailGoto(EXPR, ErrExit) #define IfFailWin32Go(EXPR) IfFailWin32Goto(EXPR, ErrExit) #endif // _DEBUG_IMPL #define IfNullGoto(EXPR, LABEL) \ do { if ((EXPR) == NULL) { OutOfMemory(); IfFailGoto(E_OUTOFMEMORY, LABEL); } } while (false) #ifndef IfNullRet #define IfNullRet(EXPR) \ do { if ((EXPR) == NULL) { OutOfMemory(); return E_OUTOFMEMORY; } } while (false) #endif //!IfNullRet #define IfNullGo(EXPR) IfNullGoto(EXPR, ErrExit) #ifdef __cplusplus } #endif // __cplusplus #undef assert #define assert _ASSERTE #undef _ASSERT #define _ASSERT _ASSERTE #if defined(_DEBUG) && defined(HOST_WINDOWS) // This function returns the EXE time stamp (effectively a random number) // Under retail it always returns 0. This is meant to be used in the // RandomOnExe macro unsigned DbgGetEXETimeStamp(); // returns true 'fractionOn' amount of the time using the EXE timestamp // as the random number seed. For example DbgRandomOnExe(.1) returns true 1/10 // of the time. We use the line number so that different uses of DbgRandomOnExe // will not be coorelated with each other (9973 is prime). Returns false on a retail build #define DbgRandomOnHashAndExe(hash, fractionOn) \ (((DbgGetEXETimeStamp() * __LINE__ * ((hash) ? (hash) : 1)) % 9973) < \ unsigned((fractionOn) * 9973)) #define DbgRandomOnExe(fractionOn) DbgRandomOnHashAndExe(0, fractionOn) #define DbgRandomOnStringAndExe(string, fractionOn) DbgRandomOnHashAndExe(HashStringA(string), fractionOn) #else #define DbgGetEXETimeStamp() 0 #define DbgRandomOnHashAndExe(hash, fractionOn) 0 #define DbgRandomOnExe(fractionOn) 0 #define DbgRandomOnStringAndExe(fractionOn) 0 #endif // _DEBUG && !FEATUREPAL #endif
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/dlls/mscorpe/ceefilegenwriter.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Derived class from CCeeGen which handles writing out // the exe. All references to PEWriter pulled out of CCeeGen, // and moved here // // #include "stdafx.h" #include <string.h> #include <limits.h> #include "corerror.h" #include <posterror.h> #include <shlwapi.h> // The following block contains a template for the default entry point stubs of a COM+ // IL only program. One can emit these stubs (with some fix-ups) and make // the code supplied the entry point value for the image. The fix-ups will // in turn cause mscoree.dll to be loaded and the correct entry point to be // called. // // Note: Although these stubs contain x86 specific code, they are used // for all platforms //***************************************************************************** // This stub is designed for a x86 Windows application. It will call the // _CorExeMain function in mscoree.dll. This entry point will in turn load // and run the IL program. // // jump _CorExeMain(); // // The code jumps to the imported function _CorExeMain using the iat. // The address in the template is address of the iat entry which is // fixed up by the loader when the image is paged in. //***************************************************************************** const BYTE ExeMainX86Template[] = { // Jump through IAT to _CorExeMain 0xFF, 0x25, // jmp [iat:_CorDllMain entry] 0x00, 0x00, 0x00, 0x00, // address to replace }; #define ExeMainX86TemplateSize sizeof(ExeMainX86Template) #define CorExeMainX86IATOffset 2 //***************************************************************************** // This stub is designed for a x86 Windows application. It will call the // _CorDllMain function in mscoree.dll with with the base entry point for // the loaded DLL. This entry point will in turn load and run the IL program. // // jump _CorDllMain // // The code jumps to the imported function _CorExeMain using the iat. // The address in the template is address of the iat entry which is // fixed up by the loader when the image is paged in. //***************************************************************************** const BYTE DllMainX86Template[] = { // Jump through IAT to CorDllMain 0xFF, 0x25, // jmp [iat:_CorDllMain entry] 0x00, 0x00, 0x00, 0x00, // address to replace }; #define DllMainX86TemplateSize sizeof(DllMainX86Template) #define CorDllMainX86IATOffset 2 //***************************************************************************** // This stub is designed for a AMD64 Windows application. It will call the // _CorExeMain function in mscoree.dll. This entry point will in turn load // and run the IL program. // // mov rax, _CorExeMain(); // jmp [rax] // // The code jumps to the imported function _CorExeMain using the iat. // The address in the template is address of the iat entry which is // fixed up by the loader when the image is paged in. //***************************************************************************** const BYTE ExeMainAMD64Template[] = { // Jump through IAT to _CorExeMain 0x48, 0xA1, // rex.w rex.b mov rax,[following address] 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,//address of iat:_CorExeMain entry 0xFF, 0xE0 // jmp [rax] }; #define ExeMainAMD64TemplateSize sizeof(ExeMainAMD64Template) #define CorExeMainAMD64IATOffset 2 //***************************************************************************** // This stub is designed for a AMD64 Windows application. It will call the // _CorDllMain function in mscoree.dll with with the base entry point for // the loaded DLL. This entry point will in turn load and run the IL program. // // mov rax, _CorDllMain(); // jmp [rax] // // The code jumps to the imported function _CorDllMain using the iat. // The address in the template is address of the iat entry which is // fixed up by the loader when the image is paged in. //***************************************************************************** const BYTE DllMainAMD64Template[] = { // Jump through IAT to CorDllMain 0x48, 0xA1, // rex.w rex.b mov rax,[following address] 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,//address of iat:_CorDllMain entry 0xFF, 0xE0 // jmp [rax] }; #define DllMainAMD64TemplateSize sizeof(DllMainAMD64Template) #define CorDllMainAMD64IATOffset 2 //***************************************************************************** // This stub is designed for an ia64 Windows application. It will call the // _CorExeMain function in mscoree.dll. This entry point will in turn load // and run the IL program. // // jump _CorExeMain(); // // The code jumps to the imported function _CorExeMain using the iat. // We set the value of gp to point at the iat table entry for _CorExeMain //***************************************************************************** const BYTE ExeMainIA64Template[] = { // ld8 r9 = [gp] ;; // ld8 r10 = [r9],8 // nop.i ;; // ld8 gp = [r9] // mov b6 = r10 // br.cond.sptk.few b6 // 0x0B, 0x48, 0x00, 0x02, 0x18, 0x10, 0xA0, 0x40, 0x24, 0x30, 0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x10, 0x08, 0x00, 0x12, 0x18, 0x10, 0x60, 0x50, 0x04, 0x80, 0x03, 0x00, 0x60, 0x00, 0x80, 0x00 }; #define ExeMainIA64TemplateSize sizeof(ExeMainIA64Template) //***************************************************************************** // This stub is designed for an ia64 Windows application. It will call the // _CorDllMain function in mscoree.dll with with the base entry point for // the loaded DLL. This entry point will in turn load and run the IL program. // // jump _CorDllMain // // The code jumps to the imported function _CorExeMain using the iat. // We set the value of gp to point at the iat table entry for _CorExeMain //***************************************************************************** const BYTE DllMainIA64Template[] = { // ld8 r9 = [gp] ;; // ld8 r10 = [r9],8 // nop.i ;; // ld8 gp = [r9] // mov b6 = r10 // br.cond.sptk.few b6 // 0x0B, 0x48, 0x00, 0x02, 0x18, 0x10, 0xA0, 0x40, 0x24, 0x30, 0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x10, 0x08, 0x00, 0x12, 0x18, 0x10, 0x60, 0x50, 0x04, 0x80, 0x03, 0x00, 0x60, 0x00, 0x80, 0x00 }; #define DllMainIA64TemplateSize sizeof(DllMainIA64Template) // Get the Symbol entry given the head and a 0-based index inline IMAGE_SYMBOL* GetSymbolEntry(IMAGE_SYMBOL* pHead, SIZE_T idx) { return (IMAGE_SYMBOL*) (((BYTE*) pHead) + IMAGE_SIZEOF_SYMBOL * idx); } //***************************************************************************** // To get a new instance, call CreateNewInstance() or CreateNewInstanceEx() instead of new //***************************************************************************** HRESULT CeeFileGenWriter::CreateNewInstance(CCeeGen *pCeeFileGenFrom, CeeFileGenWriter* & pGenWriter, DWORD createFlags) { return CreateNewInstanceEx(pCeeFileGenFrom, pGenWriter, createFlags); } // // Seed file is used as the base file. The new file data will be "appended" to the seed file // HRESULT CeeFileGenWriter::CreateNewInstanceEx(CCeeGen *pCeeFileGenFrom, CeeFileGenWriter* & pGenWriter, DWORD createFlags, LPCWSTR seedFileName) { HRESULT hr = S_OK; ULONG preallocatedOffset = 0; NewHolder<PEWriter> pPEWriter(NULL); NewHolder<CeeFileGenWriter> pPrivateGenWriter; CeeSection *corHeaderSection = NULL; pPrivateGenWriter = new (nothrow) CeeFileGenWriter; if (pPrivateGenWriter == NULL) IfFailGo(E_OUTOFMEMORY); pPEWriter = new (nothrow) PEWriter; if (pPEWriter == NULL) IfFailGo(E_OUTOFMEMORY); //workaround //What's really the correct thing to be doing here? //HRESULT hr = pPEWriter->Init(pCeeFileGenFrom ? pCeeFileGenFrom->getPESectionMan() : NULL); hr = pPEWriter->Init(NULL, createFlags, seedFileName); IfFailGo(hr); //Create the general PEWriter. pPrivateGenWriter->m_peSectionMan = pPEWriter; hr = pPrivateGenWriter->Init(); // base class member to finish init IfFailGo(hr); if (!seedFileName) // Use base file's preferred base (if present) { if (pPEWriter->isPE32()) { pPrivateGenWriter->setImageBase((DWORD) CEE_IMAGE_BASE_32); // use same default as linker } else { pPrivateGenWriter->setImageBase64((ULONGLONG) CEE_IMAGE_BASE_64); // use same default as linker } } pPrivateGenWriter->setSubsystem(IMAGE_SUBSYSTEM_WINDOWS_CUI, CEE_IMAGE_SUBSYSTEM_MAJOR_VERSION, CEE_IMAGE_SUBSYSTEM_MINOR_VERSION); if (pPEWriter->createCorMainStub()) { hr = pPrivateGenWriter->allocateIAT(); // so the IAT goes out first IfFailGo(hr); } hr = pPrivateGenWriter->allocateCorHeader(); // get COR header near front IfFailGo(hr); //If we were passed a CCeeGen at the beginning, copy it's data now. if (pCeeFileGenFrom) { pCeeFileGenFrom->cloneInstance((CCeeGen*)pPrivateGenWriter); } hr = pPrivateGenWriter->getSectionCreate(".text0", sdExecute, &corHeaderSection); IfFailGo(hr); preallocatedOffset = corHeaderSection->dataLen(); // set il RVA to be after the preallocated sections pPEWriter->setIlRva(preallocatedOffset); pPEWriter.SuppressRelease(); pPrivateGenWriter.SuppressRelease(); pGenWriter = pPrivateGenWriter; ErrExit: return hr; } // HRESULT CeeFileGenWriter::CreateNewInstance() CeeFileGenWriter::CeeFileGenWriter() // ctor is protected { m_outputFileName = NULL; m_resourceFileName = NULL; m_dllSwitch = false; m_entryPoint = 0; m_comImageFlags = COMIMAGE_FLAGS_ILONLY; // ceegen PEs don't have native code m_iatOffset = 0; m_dllCount = 0; m_dwManifestSize = 0; m_dwManifestRVA = NULL; m_dwStrongNameSize = 0; m_dwStrongNameRVA = NULL; m_dwVTableSize = 0; m_dwVTableRVA = NULL; m_iDataDlls = NULL; m_linked = false; m_fixed = false; } // CeeFileGenWriter::CeeFileGenWriter() //***************************************************************************** // Cleanup //***************************************************************************** HRESULT CeeFileGenWriter::Cleanup() // virtual { ((PEWriter *)m_peSectionMan)->Cleanup(); // call derived cleanup delete m_peSectionMan; m_peSectionMan = NULL; // so base class won't delete delete[] m_outputFileName; delete[] m_resourceFileName; if (m_iDataDlls) { for (int i=0; i < m_dllCount; i++) { if (m_iDataDlls[i].m_methodName) delete[] m_iDataDlls[i].m_methodName; } delete[] m_iDataDlls; } return CCeeGen::Cleanup(); } // HRESULT CeeFileGenWriter::Cleanup() HRESULT CeeFileGenWriter::link() { HRESULT hr = checkForErrors(); if (! SUCCEEDED(hr)) return hr; // Don't set this if SetManifestEntry was not called - zapper sets the // resource directory explicitly if (m_dwManifestSize != 0) { m_corHeader->Resources.VirtualAddress = VAL32(m_dwManifestRVA); m_corHeader->Resources.Size = VAL32(m_dwManifestSize); } if (m_dwStrongNameSize != 0) { m_corHeader->StrongNameSignature.VirtualAddress = VAL32(m_dwStrongNameRVA); m_corHeader->StrongNameSignature.Size = VAL32(m_dwStrongNameSize); } if (m_dwVTableSize != 0) { m_corHeader->VTableFixups.VirtualAddress = VAL32(m_dwVTableRVA); m_corHeader->VTableFixups.Size = VAL32(m_dwVTableSize); } unsigned characteristicsMask = IMAGE_FILE_EXECUTABLE_IMAGE; if (getPEWriter().isPE32()) characteristicsMask |= IMAGE_FILE_32BIT_MACHINE; if (!getPEWriter().isPE32()) characteristicsMask |= IMAGE_FILE_LARGE_ADDRESS_AWARE; getPEWriter().setCharacteristics(characteristicsMask); m_corHeader->cb = VAL32(sizeof(IMAGE_COR20_HEADER)); m_corHeader->MajorRuntimeVersion = VAL16(COR_VERSION_MAJOR); m_corHeader->MinorRuntimeVersion = VAL16(COR_VERSION_MINOR); if (m_dllSwitch) getPEWriter().setCharacteristics(IMAGE_FILE_DLL); m_corHeader->Flags = VAL32(m_comImageFlags); IMAGE_COR20_HEADER_FIELD(*m_corHeader, EntryPointToken) = VAL32(m_entryPoint); _ASSERTE(TypeFromToken(m_entryPoint) == mdtMethodDef || m_entryPoint == mdTokenNil || TypeFromToken(m_entryPoint) == mdtFile); setDirectoryEntry(getCorHeaderSection(), IMAGE_DIRECTORY_ENTRY_COMHEADER, sizeof(IMAGE_COR20_HEADER), m_corHeaderOffset); if ((m_comImageFlags & COMIMAGE_FLAGS_IL_LIBRARY) == 0 && !m_linked) { hr = emitExeMain(); if (FAILED(hr)) return hr; #ifndef TARGET_UNIX hr = emitResourceSection(); if (FAILED(hr)) return hr; #endif } m_linked = true; IfFailRet(getPEWriter().link()); return S_OK; } // HRESULT CeeFileGenWriter::link() HRESULT CeeFileGenWriter::fixup() { HRESULT hr; m_fixed = true; if (!m_linked) IfFailRet(link()); CeeGenTokenMapper *pMapper = getTokenMapper(); // Apply token remaps if there are any. if (! m_fTokenMapSupported && pMapper != NULL) { IMetaDataImport *pImport; hr = pMapper->GetMetaData(&pImport); _ASSERTE(SUCCEEDED(hr)); hr = MapTokens(pMapper, pImport); pImport->Release(); } // remap the entry point if entry point token has been moved if (pMapper != NULL) { mdToken tk = m_entryPoint; pMapper->HasTokenMoved(tk, tk); IMAGE_COR20_HEADER_FIELD(*m_corHeader, EntryPointToken) = VAL32(tk); } IfFailRet(getPEWriter().fixup(pMapper)); return S_OK; } // HRESULT CeeFileGenWriter::fixup() HRESULT CeeFileGenWriter::generateImage(void **ppImage) { HRESULT hr = S_OK; LPCWSTR outputFileName = NULL; #ifndef TARGET_UNIX HANDLE hThreadToken = NULL; // Impersonation is only supported on Win2k and above. if (!OpenThreadToken(GetCurrentThread(), TOKEN_READ | TOKEN_IMPERSONATE, TRUE, &hThreadToken)) { if (GetLastError() != ERROR_NO_TOKEN) { _ASSERTE(!"Failed to get thread token!"); return HRESULT_FROM_GetLastError(); } } if (hThreadToken != NULL) { if (!RevertToSelf()) { _ASSERTE(!"Failed to revert impersonation!"); CloseHandle(hThreadToken); return HRESULT_FROM_GetLastError(); } } #endif // !TARGET_UNIX if (!m_fixed) IfFailGo(fixup()); outputFileName = m_outputFileName; if (! outputFileName && ppImage == NULL) { if (m_comImageFlags & COMIMAGE_FLAGS_IL_LIBRARY) outputFileName = W("output.ill"); else if (m_dllSwitch) outputFileName = W("output.dll"); else outputFileName = W("output.exe"); } // output file name and ppImage are mutually exclusive _ASSERTE((NULL == outputFileName && ppImage != NULL) || (outputFileName != NULL && NULL == ppImage)); if (outputFileName != NULL) IfFailGo(getPEWriter().write(outputFileName)); else IfFailGo(getPEWriter().write(ppImage)); ErrExit: #ifndef TARGET_UNIX if (hThreadToken != NULL) { BOOL success = SetThreadToken(NULL, hThreadToken); CloseHandle(hThreadToken); if (!success) { _ASSERTE(!"Failed to reimpersonate!"); hr = HRESULT_FROM_GetLastError(); } } #endif // !TARGET_UNIX return hr; } // HRESULT CeeFileGenWriter::generateImage() HRESULT CeeFileGenWriter::setOutputFileName(_In_ LPWSTR fileName) { if (m_outputFileName) delete[] m_outputFileName; size_t len = wcslen(fileName) + 1; m_outputFileName = (LPWSTR)new (nothrow) WCHAR[len]; TESTANDRETURN(m_outputFileName!=NULL, E_OUTOFMEMORY); wcscpy_s(m_outputFileName, len, fileName); return S_OK; } // HRESULT CeeFileGenWriter::setOutputFileName() HRESULT CeeFileGenWriter::setResourceFileName(_In_ LPWSTR fileName) { if (m_resourceFileName) delete[] m_resourceFileName; size_t len = wcslen(fileName) + 1; m_resourceFileName = (LPWSTR)new (nothrow) WCHAR[len]; TESTANDRETURN(m_resourceFileName!=NULL, E_OUTOFMEMORY); wcscpy_s(m_resourceFileName, len, fileName); return S_OK; } // HRESULT CeeFileGenWriter::setResourceFileName() HRESULT CeeFileGenWriter::setImageBase(size_t imageBase) { _ASSERTE(getPEWriter().isPE32()); getPEWriter().setImageBase32((DWORD)imageBase); return S_OK; } // HRESULT CeeFileGenWriter::setImageBase() HRESULT CeeFileGenWriter::setImageBase64(ULONGLONG imageBase) { _ASSERTE(!getPEWriter().isPE32()); getPEWriter().setImageBase64(imageBase); return S_OK; } // HRESULT CeeFileGenWriter::setImageBase64() HRESULT CeeFileGenWriter::setFileAlignment(ULONG fileAlignment) { getPEWriter().setFileAlignment(fileAlignment); return S_OK; } // HRESULT CeeFileGenWriter::setFileAlignment() HRESULT CeeFileGenWriter::setSubsystem(DWORD subsystem, DWORD major, DWORD minor) { getPEWriter().setSubsystem(subsystem, major, minor); return S_OK; } // HRESULT CeeFileGenWriter::setSubsystem() HRESULT CeeFileGenWriter::checkForErrors() { if (TypeFromToken(m_entryPoint) == mdtMethodDef) { if (m_dllSwitch) { //current spec would need to check the binary sig of the entry point method } return S_OK; } return S_OK; } // HRESULT CeeFileGenWriter::checkForErrors() HRESULT CeeFileGenWriter::getMethodRVA(ULONG codeOffset, ULONG *codeRVA) { _ASSERTE(codeRVA); *codeRVA = getPEWriter().getIlRva() + codeOffset; return S_OK; } // HRESULT CeeFileGenWriter::getMethodRVA() HRESULT CeeFileGenWriter::setDirectoryEntry(CeeSection &section, ULONG entry, ULONG size, ULONG offset) { return getPEWriter().setDirectoryEntry((PEWriterSection*)(&section.getImpl()), entry, size, offset); } // HRESULT CeeFileGenWriter::setDirectoryEntry() HRESULT CeeFileGenWriter::getFileTimeStamp(DWORD *pTimeStamp) { return getPEWriter().getFileTimeStamp(pTimeStamp); } // HRESULT CeeFileGenWriter::getFileTimeStamp() HRESULT CeeFileGenWriter::setAddrReloc(UCHAR *instrAddr, DWORD value) { *(DWORD *)instrAddr = VAL32(value); return S_OK; } // HRESULT CeeFileGenWriter::setAddrReloc() HRESULT CeeFileGenWriter::addAddrReloc(CeeSection &thisSection, UCHAR *instrAddr, DWORD offset, CeeSection *targetSection) { if (!targetSection) { thisSection.addBaseReloc(offset, srRelocHighLow); } else { thisSection.addSectReloc(offset, *targetSection, srRelocHighLow); } return S_OK; } // HRESULT CeeFileGenWriter::addAddrReloc() // create CorExeMain and import directory into .text and the .iat into .data // // The structure of the import directory information is as follows, but it is not contiguous in // section. All the r/o data goes into the .text section and the iat array (which the loader // updates with the imported addresses) goes into the .data section because WINCE needs it to be writable. // // struct IData { // // one for each DLL, terminating in NULL // IMAGE_IMPORT_DESCRIPTOR iid[]; // // import lookup table: a set of entries for the methods of each DLL, // // terminating each set with NULL // IMAGE_THUNK_DATA32/64 ilt[]; // // hint/name table: an set of entries for each method of each DLL wiht // // no terminating entry // struct { // WORD Hint; // // null terminated string // BYTE Name[]; // } ibn; // Hint/name table // // import address table: a set of entries for the methods of each DLL, // // terminating each set with NULL // IMAGE_THUNK_DATA32/64 iat[]; // // one for each DLL, null terminated strings // BYTE DllName[]; // }; // // IAT must be first in its section, so have code here to allocate it up front // prior to knowing other info such as if dll or not. This won't work if have > 1 // function imported, but we'll burn that bridge when we get to it. HRESULT CeeFileGenWriter::allocateIAT() { m_dllCount = 1; m_iDataDlls = new (nothrow) IDataDllInfo[m_dllCount]; if (m_iDataDlls == NULL) { return E_OUTOFMEMORY; } memset(m_iDataDlls, '\0', m_dllCount * sizeof(IDataDllInfo)); m_iDataDlls[0].m_name = "mscoree.dll"; m_iDataDlls[0].m_numMethods = 1; m_iDataDlls[0].m_methodName = new (nothrow) const char*[m_iDataDlls[0].m_numMethods]; if (! m_iDataDlls[0].m_methodName) { return E_OUTOFMEMORY; } m_iDataDlls[0].m_methodName[0] = NULL; int iDataSizeIAT = 0; for (int i=0; i < m_dllCount; i++) { m_iDataDlls[i].m_iatOffset = iDataSizeIAT; iDataSizeIAT += (m_iDataDlls[i].m_numMethods + 1) * (getPEWriter().isPE32() ? sizeof(IMAGE_THUNK_DATA32) : sizeof(IMAGE_THUNK_DATA64)); } HRESULT hr = getSectionCreate(".text0", sdExecute, &m_iDataSectionIAT); TESTANDRETURNHR(hr); m_iDataOffsetIAT = m_iDataSectionIAT->dataLen(); _ASSERTE(m_iDataOffsetIAT == 0); m_iDataIAT = m_iDataSectionIAT->getBlock(iDataSizeIAT); if (! m_iDataIAT) { return E_OUTOFMEMORY; } memset(m_iDataIAT, '\0', iDataSizeIAT); // Don't set the IAT directory entry yet, since we may not actually end up doing // an emitExeMain. return S_OK; } // HRESULT CeeFileGenWriter::allocateIAT() HRESULT CeeFileGenWriter::emitExeMain() { if (m_dllCount == 0) return S_OK; // Note: code later on in this method assumes that mscoree.dll is at // index m_iDataDlls[0], with CorDllMain or CorExeMain at method[0] _ASSERTE(getPEWriter().createCorMainStub()); if (m_dllSwitch) { m_iDataDlls[0].m_methodName[0] = "_CorDllMain"; } else { m_iDataDlls[0].m_methodName[0] = "_CorExeMain"; } // IMAGE_IMPORT_DESCRIPTOR on PE/PE+ must be 4-byte or 8-byte aligned int align = (getPEWriter().isPE32()) ? 4 : 8; int curOffset = getTextSection().dataLen(); int diff = ((curOffset + align -1) & ~(align-1)) - curOffset; if (diff) { char* pDiff = getTextSection().getBlock(diff); if (NULL==pDiff) return E_OUTOFMEMORY; memset(pDiff,0,diff); } int iDataSizeRO = (m_dllCount + 1) * sizeof(IMAGE_IMPORT_DESCRIPTOR); CeeSection &iDataSectionRO = getTextSection(); int iDataOffsetRO = iDataSectionRO.dataLen(); int iDataSizeIAT = 0; int i; for (i=0; i < m_dllCount; i++) { m_iDataDlls[i].m_iltOffset = iDataSizeRO + iDataSizeIAT; iDataSizeIAT += (m_iDataDlls[i].m_numMethods + 1) * (getPEWriter().isPE32() ? sizeof(IMAGE_THUNK_DATA32) : sizeof(IMAGE_THUNK_DATA64)); } iDataSizeRO += iDataSizeIAT; for (i=0; i < m_dllCount; i++) { int delta = (iDataSizeRO + iDataOffsetRO) % 16; // make sure is on a 16-byte offset if (delta != 0) iDataSizeRO += (16 - delta); _ASSERTE((iDataSizeRO + iDataOffsetRO) % 16 == 0); m_iDataDlls[i].m_ibnOffset = iDataSizeRO; for (int j=0; j < m_iDataDlls[i].m_numMethods; j++) { int nameLen = (int)(strlen(m_iDataDlls[i].m_methodName[j]) + 1); iDataSizeRO += sizeof(WORD) + nameLen + nameLen%2; } } for (i=0; i < m_dllCount; i++) { m_iDataDlls[i].m_nameOffset = iDataSizeRO; iDataSizeRO += (int)(strlen(m_iDataDlls[i].m_name) + 2); } char *iDataRO = iDataSectionRO.getBlock(iDataSizeRO); if (!iDataRO) return E_OUTOFMEMORY; memset(iDataRO, '\0', iDataSizeRO); setDirectoryEntry(iDataSectionRO, IMAGE_DIRECTORY_ENTRY_IMPORT, iDataSizeRO, iDataOffsetRO); IMAGE_IMPORT_DESCRIPTOR *iid = (IMAGE_IMPORT_DESCRIPTOR *)iDataRO; for (i=0; i < m_dllCount; i++) { // fill in the import descriptors for each DLL IMAGE_IMPORT_DESC_FIELD(iid[i], OriginalFirstThunk) = VAL32((ULONG)(m_iDataDlls[i].m_iltOffset + iDataOffsetRO)); iid[i].Name = VAL32(m_iDataDlls[i].m_nameOffset + iDataOffsetRO); iid[i].FirstThunk = VAL32((ULONG)(m_iDataDlls[i].m_iatOffset + m_iDataOffsetIAT)); iDataSectionRO.addSectReloc( (unsigned)(iDataOffsetRO + (char *)(&IMAGE_IMPORT_DESC_FIELD(iid[i], OriginalFirstThunk)) - iDataRO), iDataSectionRO, srRelocAbsolute); iDataSectionRO.addSectReloc( (unsigned)(iDataOffsetRO + (char *)(&iid[i].Name) - iDataRO), iDataSectionRO, srRelocAbsolute); iDataSectionRO.addSectReloc( (unsigned)(iDataOffsetRO + (char *)(&iid[i].FirstThunk) - iDataRO), *m_iDataSectionIAT, srRelocAbsolute); if (getPEWriter().isPE32()) { // now fill in the import lookup table for each DLL IMAGE_THUNK_DATA32 *ilt = (IMAGE_THUNK_DATA32*) (iDataRO + m_iDataDlls[i].m_iltOffset); IMAGE_THUNK_DATA32 *iat = (IMAGE_THUNK_DATA32*) (m_iDataIAT + m_iDataDlls[i].m_iatOffset); int ibnOffset = m_iDataDlls[i].m_ibnOffset; for (int j=0; j < m_iDataDlls[i].m_numMethods; j++) { ilt[j].u1.AddressOfData = VAL32((ULONG)(ibnOffset + iDataOffsetRO)); iat[j].u1.AddressOfData = VAL32((ULONG)(ibnOffset + iDataOffsetRO)); iDataSectionRO.addSectReloc( (unsigned)(iDataOffsetRO + (char *)(&ilt[j].u1.AddressOfData) - iDataRO), iDataSectionRO, srRelocAbsolute); m_iDataSectionIAT->addSectReloc( (unsigned)(m_iDataOffsetIAT + (char *)(&iat[j].u1.AddressOfData) - m_iDataIAT), iDataSectionRO, srRelocAbsolute); int nameLen = (int)(strlen(m_iDataDlls[i].m_methodName[j]) + 1); memcpy(iDataRO + ibnOffset + offsetof(IMAGE_IMPORT_BY_NAME, Name), m_iDataDlls[i].m_methodName[j], nameLen); ibnOffset += sizeof(WORD) + nameLen + nameLen%2; } } else { // now fill in the import lookup table for each DLL IMAGE_THUNK_DATA64 *ilt = (IMAGE_THUNK_DATA64*) (iDataRO + m_iDataDlls[i].m_iltOffset); IMAGE_THUNK_DATA64 *iat = (IMAGE_THUNK_DATA64*) (m_iDataIAT + m_iDataDlls[i].m_iatOffset); int ibnOffset = m_iDataDlls[i].m_ibnOffset; for (int j=0; j < m_iDataDlls[i].m_numMethods; j++) { ilt[j].u1.AddressOfData = VAL64((ULONG)(ibnOffset + iDataOffsetRO)); iat[j].u1.AddressOfData = VAL64((ULONG)(ibnOffset + iDataOffsetRO)); iDataSectionRO.addSectReloc( (unsigned)(iDataOffsetRO + (char *)(&ilt[j].u1.AddressOfData) - iDataRO), iDataSectionRO, srRelocAbsolute); m_iDataSectionIAT->addSectReloc( (unsigned)(m_iDataOffsetIAT + (char *)(&iat[j].u1.AddressOfData) - m_iDataIAT), iDataSectionRO, srRelocAbsolute); int nameLen = (int)(strlen(m_iDataDlls[i].m_methodName[j]) + 1); memcpy(iDataRO + ibnOffset + offsetof(IMAGE_IMPORT_BY_NAME, Name), m_iDataDlls[i].m_methodName[j], nameLen); ibnOffset += sizeof(WORD) + nameLen + nameLen%2; } } // now fill in the import lookup table for each DLL strcpy_s(iDataRO + m_iDataDlls[i].m_nameOffset, iDataSizeRO - m_iDataDlls[i].m_nameOffset, m_iDataDlls[i].m_name); } // end of for loop i < m_dllCount if (getPEWriter().isI386()) { // Put the entry point code into the PE file unsigned entryPointOffset = getTextSection().dataLen(); int iatOffset = (int) (entryPointOffset + (m_dllSwitch ? CorDllMainX86IATOffset : CorExeMainX86IATOffset)); align = 4; // x86 fixups must be 4-byte aligned // The IAT offset must be aligned because fixup is applied to it. diff = ((iatOffset + align -1) & ~(align-1)) - iatOffset; if (diff) { char* pDiff = getTextSection().getBlock(diff); if(NULL==pDiff) return E_OUTOFMEMORY; memset(pDiff,0,diff); entryPointOffset += diff; } _ASSERTE((getTextSection().dataLen() + (m_dllSwitch ? CorDllMainX86IATOffset : CorExeMainX86IATOffset)) % align == 0); getPEWriter().setEntryPointTextOffset(entryPointOffset); if (m_dllSwitch) { UCHAR *dllMainBuf = (UCHAR*)getTextSection().getBlock(sizeof(DllMainX86Template)); if(dllMainBuf==NULL) return E_OUTOFMEMORY; memcpy(dllMainBuf, DllMainX86Template, sizeof(DllMainX86Template)); //mscoree.dll setAddrReloc(dllMainBuf+CorDllMainX86IATOffset, m_iDataDlls[0].m_iatOffset + m_iDataOffsetIAT); addAddrReloc(getTextSection(), dllMainBuf, entryPointOffset+CorDllMainX86IATOffset, m_iDataSectionIAT); } else { UCHAR *exeMainBuf = (UCHAR*)getTextSection().getBlock(sizeof(ExeMainX86Template)); if(exeMainBuf==NULL) return E_OUTOFMEMORY; memcpy(exeMainBuf, ExeMainX86Template, sizeof(ExeMainX86Template)); //mscoree.dll setAddrReloc(exeMainBuf+CorExeMainX86IATOffset, m_iDataDlls[0].m_iatOffset + m_iDataOffsetIAT); addAddrReloc(getTextSection(), exeMainBuf, entryPointOffset+CorExeMainX86IATOffset, m_iDataSectionIAT); } } else if (getPEWriter().isAMD64()) { // Put the entry point code into the PE file unsigned entryPointOffset = getTextSection().dataLen(); int iatOffset = (int) (entryPointOffset + (m_dllSwitch ? CorDllMainAMD64IATOffset : CorExeMainAMD64IATOffset)); align = 16; // AMD64 fixups must be 8-byte aligned // The IAT offset must be aligned because fixup is applied to it. diff = ((iatOffset + align -1) & ~(align-1)) - iatOffset; if (diff) { char* pDiff = getTextSection().getBlock(diff); if(NULL==pDiff) return E_OUTOFMEMORY; memset(pDiff,0,diff); entryPointOffset += diff; } _ASSERTE((getTextSection().dataLen() + (m_dllSwitch ? CorDllMainAMD64IATOffset : CorExeMainAMD64IATOffset)) % align == 0); getPEWriter().setEntryPointTextOffset(entryPointOffset); if (m_dllSwitch) { UCHAR *dllMainBuf = (UCHAR*)getTextSection().getBlock(sizeof(DllMainAMD64Template)); if(dllMainBuf==NULL) return E_OUTOFMEMORY; memcpy(dllMainBuf, DllMainAMD64Template, sizeof(DllMainAMD64Template)); //mscoree.dll setAddrReloc(dllMainBuf+CorDllMainAMD64IATOffset, m_iDataDlls[0].m_iatOffset + m_iDataOffsetIAT); addAddrReloc(getTextSection(), dllMainBuf, entryPointOffset+CorDllMainAMD64IATOffset, m_iDataSectionIAT); } else { UCHAR *exeMainBuf = (UCHAR*)getTextSection().getBlock(sizeof(ExeMainAMD64Template)); if(exeMainBuf==NULL) return E_OUTOFMEMORY; memcpy(exeMainBuf, ExeMainAMD64Template, sizeof(ExeMainAMD64Template)); //mscoree.dll setAddrReloc(exeMainBuf+CorExeMainAMD64IATOffset, m_iDataDlls[0].m_iatOffset + m_iDataOffsetIAT); addAddrReloc(getTextSection(), exeMainBuf, entryPointOffset+CorExeMainAMD64IATOffset, m_iDataSectionIAT); } } else if (getPEWriter().isIA64()) { // Must have a PE+ PE64 file //_ASSERTE(!getPEWriter().isPE32()); // Put the entry point code into the PE+ file curOffset = getTextSection().dataLen(); align = 16; // instructions on ia64 must be 16-byte aligned // The entry point address be aligned diff = ((curOffset + align -1) & ~(align-1)) - curOffset; if (diff) { char* pDiff = getTextSection().getBlock(diff); if(NULL==pDiff) return E_OUTOFMEMORY; memset(pDiff,0,diff); } unsigned entryPointOffset = getTextSection().dataLen(); if (m_dllSwitch) { UCHAR *dllMainBuf = (UCHAR*)getTextSection().getBlock(sizeof(DllMainIA64Template)); if (dllMainBuf==NULL) return E_OUTOFMEMORY; memcpy(dllMainBuf, DllMainIA64Template, sizeof(DllMainIA64Template)); } else { UCHAR *exeMainBuf = (UCHAR*)getTextSection().getBlock(sizeof(ExeMainIA64Template)); if (exeMainBuf==NULL) return E_OUTOFMEMORY; memcpy(exeMainBuf, ExeMainIA64Template, sizeof(ExeMainIA64Template)); } // Put the entry point function pointer into the PE file unsigned entryPlabelOffset = getTextSection().dataLen(); getPEWriter().setEntryPointTextOffset(entryPlabelOffset); UCHAR * entryPtr = (UCHAR*)getTextSection().getBlock(sizeof(ULONGLONG)); UCHAR * gpPtr = (UCHAR*)getTextSection().getBlock(sizeof(ULONGLONG)); memset(entryPtr,0,sizeof(ULONGLONG)); memset(gpPtr,0,sizeof(ULONGLONG)); setAddrReloc(entryPtr, entryPointOffset); addAddrReloc(getTextSection(), entryPtr, entryPlabelOffset, &getTextSection()); setAddrReloc(gpPtr, m_iDataDlls[0].m_iatOffset + m_iDataOffsetIAT); addAddrReloc(getTextSection(), gpPtr, entryPlabelOffset+8, m_iDataSectionIAT); } else { _ASSERTE(!"Unknown target machine"); } // Now set our IAT entry since we're using the IAT setDirectoryEntry(*m_iDataSectionIAT, IMAGE_DIRECTORY_ENTRY_IAT, iDataSizeIAT, m_iDataOffsetIAT); return S_OK; } // HRESULT CeeFileGenWriter::emitExeMain() #ifndef TARGET_UNIX // This function reads a resource file and emits it into the generated PE file. // 1. We can only link resources in obj format. Must convert from .res to .obj // with CvtRes.exe. See https://github.com/dotnet/runtime/issues/11412. // 2. Must touch up all COFF relocs from .rsrc$01 (resource header) to .rsrc$02 // (resource raw data) HRESULT CeeFileGenWriter::emitResourceSection() { if (m_resourceFileName == NULL) return S_OK; const WCHAR* szResFileName = m_resourceFileName; // read the resource file and spit it out in the .rsrc section HANDLE hFile = INVALID_HANDLE_VALUE; HANDLE hMap = NULL; IMAGE_FILE_HEADER *hMod = NULL; HRESULT hr = S_OK; struct Param { HANDLE hFile; HANDLE hMap; IMAGE_FILE_HEADER *hMod; const WCHAR* szResFileName; CeeFileGenWriter *genWriter; HRESULT hr; } param; param.hFile = hFile; param.hMap = hMap; param.hMod = hMod; param.szResFileName = szResFileName; param.genWriter = this; param.hr = S_OK; PAL_TRY(Param *, pParam, &param) { SIZE_T cbFileSize; const BYTE *pbStartOfMappedMem; IMAGE_SECTION_HEADER *rsrc[2] = { NULL, NULL }; S_SIZE_T cbTotalSizeOfRawData; char *data = NULL; SIZE_T cReloc = 0; IMAGE_RELOCATION *pReloc = NULL; SIZE_T cSymbol = 0; IMAGE_SYMBOL *pSymbolTable = NULL; // create a mapped view of the .res file pParam->hFile = WszCreateFile(pParam->szResFileName, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); if (pParam->hFile == INVALID_HANDLE_VALUE) { //dbprintf("Resource file %S not found\n", szResFileName); pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } // Grab the file size for verification checks. { DWORD dwFileSizeHigh; DWORD dwFileSize = SafeGetFileSize(pParam->hFile, &dwFileSizeHigh); if (dwFileSize == (DWORD)(-1)) { pParam->hr = HRESULT_FROM_GetLastError(); goto lDone; } // Since we intend to memory map this file, the size of the file can not need 64 bits to represent! if (dwFileSizeHigh != 0) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } cbFileSize = static_cast<SIZE_T>(dwFileSize); } pParam->hMap = WszCreateFileMapping(pParam->hFile, 0, PAGE_READONLY, 0, 0, NULL); if (pParam->hMap == NULL) { //dbprintf("Invalid .res file: %S\n", szResFileName); pParam->hr = HRESULT_FROM_GetLastError(); goto lDone; } pbStartOfMappedMem = reinterpret_cast<const BYTE *>(MapViewOfFile(pParam->hMap, FILE_MAP_READ, 0, 0, 0)); // test failure conditions if (pbStartOfMappedMem == NULL) { //dbprintf("Invalid .res file: %S:Can't get header\n", szResFileName); pParam->hr = HRESULT_FROM_GetLastError(); goto lDone; } // Check that the file contains an IMAGE_FILE_HEADER structure. if (IMAGE_SIZEOF_FILE_HEADER > cbFileSize) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } pParam->hMod = (IMAGE_FILE_HEADER*)pbStartOfMappedMem; if (VAL16(pParam->hMod->SizeOfOptionalHeader) != 0) { //dbprintf("Invalid .res file: %S:Illegal optional header\n", szResFileName); pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); // GetLastError() = 0 since API worked. goto lDone; } // Scan all section headers and grab .rsrc$01 and .rsrc$02 { // First section is directly after header SIZE_T cSections = static_cast<SIZE_T>(VAL16(pParam->hMod->NumberOfSections)); SIZE_T cbStartOfSections = IMAGE_SIZEOF_FILE_HEADER; S_SIZE_T cbEndOfSections(S_SIZE_T(cbStartOfSections) + (S_SIZE_T(cSections) * S_SIZE_T(IMAGE_SIZEOF_SECTION_HEADER))); // Check that all sections are within the bounds of the mapped file. if (cbEndOfSections.IsOverflow() || cbEndOfSections.Value() > cbFileSize) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } { IMAGE_SECTION_HEADER *pSection = (IMAGE_SECTION_HEADER *)(pbStartOfMappedMem + cbStartOfSections); IMAGE_SECTION_HEADER *pSectionEnd = pSection + cSections; for (; pSection < pSectionEnd; pSection++) { if (strcmp(".rsrc$01", (char *)pSection->Name) == 0) { rsrc[0] = pSection; } else if (strcmp(".rsrc$02", (char *)pSection->Name) == 0) { rsrc[1] = pSection; } } } } // If we don't have both resources, fail. if (!rsrc[0] || !rsrc[1]) { //dbprintf("Invalid .res file: %S: Missing sections .rsrc$01 or .rsrc$02\n", szResFileName); pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } // Verify the resource data starts and sizes { cbTotalSizeOfRawData = S_SIZE_T(0); for (int i = 0; i < 2; i++) { S_SIZE_T cbStartOfResourceData(static_cast<SIZE_T>(VAL32(rsrc[i]->PointerToRawData))); S_SIZE_T cbSizeOfResourceData(static_cast<SIZE_T>(VAL32(rsrc[i]->SizeOfRawData))); S_SIZE_T cbEndOfResourceData(cbStartOfResourceData + cbSizeOfResourceData); if (cbEndOfResourceData.IsOverflow() || cbEndOfResourceData.Value() > cbFileSize) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } cbTotalSizeOfRawData += cbSizeOfResourceData; } // Check that the total raw data doesn't overflow. if (cbTotalSizeOfRawData.IsOverflow() || cbTotalSizeOfRawData.Value() > cbFileSize) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } } PESection *rsrcSection; pParam->hr = pParam->genWriter->getPEWriter().getSectionCreate(".rsrc", sdReadOnly, &rsrcSection); if (FAILED(pParam->hr)) goto lDone; rsrcSection->directoryEntry(IMAGE_DIRECTORY_ENTRY_RESOURCE); data = rsrcSection->getBlock(static_cast<unsigned>(cbTotalSizeOfRawData.Value()), 8); if(data == NULL) { pParam->hr = E_OUTOFMEMORY; goto lDone; } // Copy resource header memcpy(data, (char *)pParam->hMod + VAL32(rsrc[0]->PointerToRawData), VAL32(rsrc[0]->SizeOfRawData)); // Map all the relocs in .rsrc$01 using the reloc and symbol tables in the COFF object., cReloc = 0; // Total number of relocs pReloc = NULL; // Reloc table start cSymbol = 0; // Total number of symbols pSymbolTable = NULL; // Symbol table start { // Check that the relocations and symbols lie within the resource cReloc = VAL16(rsrc[0]->NumberOfRelocations); SIZE_T cbStartOfRelocations = static_cast<SIZE_T>(VAL32(rsrc[0]->PointerToRelocations)); S_SIZE_T cbEndOfRelocations(S_SIZE_T(cbStartOfRelocations) + (S_SIZE_T(cReloc) * S_SIZE_T(sizeof(IMAGE_RELOCATION)))); // Verify the number of symbols fit into the resource. cSymbol = static_cast<SIZE_T>(VAL32(pParam->hMod->NumberOfSymbols)); SIZE_T cbStartOfSymbolTable = static_cast<SIZE_T>(VAL32(pParam->hMod->PointerToSymbolTable)); S_SIZE_T cbEndOfSymbolTable(S_SIZE_T(cbStartOfSymbolTable) + (S_SIZE_T(cSymbol) * S_SIZE_T(IMAGE_SIZEOF_SYMBOL))); if (cbEndOfRelocations.IsOverflow() || cbEndOfRelocations.Value() > cbFileSize || cbEndOfSymbolTable.IsOverflow() || cbEndOfSymbolTable.Value() > cbFileSize) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } pReloc = (IMAGE_RELOCATION *)(pbStartOfMappedMem + cbStartOfRelocations); pSymbolTable = (IMAGE_SYMBOL *)(pbStartOfMappedMem + cbStartOfSymbolTable); } _ASSERTE(pReloc != NULL && pSymbolTable != NULL); for(SIZE_T iReloc = 0; iReloc < cReloc; iReloc++, pReloc++) { // Ensure this is a valid reloc { S_SIZE_T cbRelocEnd = S_SIZE_T(VAL32(pReloc->VirtualAddress)) + S_SIZE_T(sizeof(DWORD)); if (cbRelocEnd.IsOverflow() || cbRelocEnd.Value() > static_cast<SIZE_T>(VAL32(rsrc[0]->SizeOfRawData))) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } } // index into symbol table, provides address into $02 DWORD iSymbol = VAL32(pReloc->SymbolTableIndex); // Make sure the index is in range if (iSymbol >= cSymbol) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } IMAGE_SYMBOL* pSymbolEntry = GetSymbolEntry(pSymbolTable, iSymbol); // Ensure the symbol entry is valid for a resource. if ((pSymbolEntry->StorageClass != IMAGE_SYM_CLASS_STATIC) || (VAL16(pSymbolEntry->Type) != IMAGE_SYM_TYPE_NULL) || (VAL16(pSymbolEntry->SectionNumber) != 3)) // 3rd section is .rsrc$02 { //dbprintf("Invalid .res file: %S:Illegal symbol entry\n", szResFileName); pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } // Ensure that RVA is valid address (inside rsrc[1]) if (VAL32(pSymbolEntry->Value) >= VAL32(rsrc[1]->SizeOfRawData)) { //dbprintf("Invalid .res file: %S:Illegal rva into .rsrc$02\n", szResFileName); pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } DWORD dwOffsetInRsrc2 = VAL32(pSymbolEntry->Value) + VAL32(rsrc[0]->SizeOfRawData); // Create reloc *(DWORD*)(data + VAL32(pReloc->VirtualAddress)) = VAL32(dwOffsetInRsrc2); rsrcSection->addSectReloc(pReloc->VirtualAddress, rsrcSection, srRelocAbsolute); } // Copy $02 (resource raw) data memcpy(data+VAL32(rsrc[0]->SizeOfRawData), (char *)pParam->hMod + VAL32(rsrc[1]->PointerToRawData), VAL32(rsrc[1]->SizeOfRawData)); lDone: ; } PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) { //dbprintf("Exception occured manipulating .res file %S\n", szResFileName); param.hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); } PAL_ENDTRY hMod = param.hMod; hFile = param.hFile; szResFileName = param.szResFileName; hr = param.hr; if (hMod != NULL) UnmapViewOfFile(hMod); if (hMap != NULL) CloseHandle(hMap); if (hFile != INVALID_HANDLE_VALUE) CloseHandle(hFile); return hr; } // HRESULT CeeFileGenWriter::emitResourceSection() #endif // !TARGET_UNIX HRESULT CeeFileGenWriter::setManifestEntry(ULONG size, ULONG offset) { if (offset) m_dwManifestRVA = offset; else { CeeSection TextSection = getTextSection(); getMethodRVA(TextSection.dataLen() - size, &m_dwManifestRVA); } m_dwManifestSize = size; return S_OK; } // HRESULT CeeFileGenWriter::setManifestEntry() HRESULT CeeFileGenWriter::setStrongNameEntry(ULONG size, ULONG offset) { m_dwStrongNameRVA = offset; m_dwStrongNameSize = size; return S_OK; } // HRESULT CeeFileGenWriter::setStrongNameEntry() HRESULT CeeFileGenWriter::setVTableEntry64(ULONG size, void* ptr) { if (ptr && size) { void * pv; CeeSection TextSection = getTextSection(); // make it DWORD-aligned ULONG L = TextSection.dataLen(); if((L &= ((ULONG)sizeof(DWORD)-1))) { L = (ULONG)sizeof(DWORD) - L; if((pv = TextSection.getBlock(L))) memset(pv,0,L); else return E_OUTOFMEMORY; } getMethodRVA(TextSection.dataLen(), &m_dwVTableRVA); if((pv = TextSection.getBlock(size))) { memcpy(pv,ptr,size); } else return E_OUTOFMEMORY; m_dwVTableSize = size; } return S_OK; } // HRESULT CeeFileGenWriter::setVTableEntry() HRESULT CeeFileGenWriter::setVTableEntry(ULONG size, ULONG offset) { return setVTableEntry64(size,(void*)(ULONG_PTR)offset); } // HRESULT CeeFileGenWriter::setVTableEntry() HRESULT CeeFileGenWriter::computeSectionOffset(CeeSection &section, _In_ char *ptr, unsigned *offset) { *offset = section.computeOffset(ptr); return S_OK; } // HRESULT CeeFileGenWriter::computeSectionOffset() HRESULT CeeFileGenWriter::computeOffset(_In_ char *ptr, CeeSection **pSection, unsigned *offset) { TESTANDRETURNPOINTER(pSection); CeeSection **s = m_sections; CeeSection **sEnd = s + m_numSections; while (s < sEnd) { if ((*s)->containsPointer(ptr)) { *pSection = *s; *offset = (*s)->computeOffset(ptr); return S_OK; } s++; } return E_FAIL; } // HRESULT CeeFileGenWriter::computeOffset() HRESULT CeeFileGenWriter::getCorHeader(IMAGE_COR20_HEADER **ppHeader) { *ppHeader = m_corHeader; return S_OK; } // HRESULT CeeFileGenWriter::getCorHeader()
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Derived class from CCeeGen which handles writing out // the exe. All references to PEWriter pulled out of CCeeGen, // and moved here // // #include "stdafx.h" #include <string.h> #include <limits.h> #include "corerror.h" #include <posterror.h> #include <shlwapi.h> // The following block contains a template for the default entry point stubs of a COM+ // IL only program. One can emit these stubs (with some fix-ups) and make // the code supplied the entry point value for the image. The fix-ups will // in turn cause mscoree.dll to be loaded and the correct entry point to be // called. // // Note: Although these stubs contain x86 specific code, they are used // for all platforms //***************************************************************************** // This stub is designed for a x86 Windows application. It will call the // _CorExeMain function in mscoree.dll. This entry point will in turn load // and run the IL program. // // jump _CorExeMain(); // // The code jumps to the imported function _CorExeMain using the iat. // The address in the template is address of the iat entry which is // fixed up by the loader when the image is paged in. //***************************************************************************** const BYTE ExeMainX86Template[] = { // Jump through IAT to _CorExeMain 0xFF, 0x25, // jmp [iat:_CorDllMain entry] 0x00, 0x00, 0x00, 0x00, // address to replace }; #define ExeMainX86TemplateSize sizeof(ExeMainX86Template) #define CorExeMainX86IATOffset 2 //***************************************************************************** // This stub is designed for a x86 Windows application. It will call the // _CorDllMain function in mscoree.dll with with the base entry point for // the loaded DLL. This entry point will in turn load and run the IL program. // // jump _CorDllMain // // The code jumps to the imported function _CorExeMain using the iat. // The address in the template is address of the iat entry which is // fixed up by the loader when the image is paged in. //***************************************************************************** const BYTE DllMainX86Template[] = { // Jump through IAT to CorDllMain 0xFF, 0x25, // jmp [iat:_CorDllMain entry] 0x00, 0x00, 0x00, 0x00, // address to replace }; #define DllMainX86TemplateSize sizeof(DllMainX86Template) #define CorDllMainX86IATOffset 2 //***************************************************************************** // This stub is designed for a AMD64 Windows application. It will call the // _CorExeMain function in mscoree.dll. This entry point will in turn load // and run the IL program. // // mov rax, _CorExeMain(); // jmp [rax] // // The code jumps to the imported function _CorExeMain using the iat. // The address in the template is address of the iat entry which is // fixed up by the loader when the image is paged in. //***************************************************************************** const BYTE ExeMainAMD64Template[] = { // Jump through IAT to _CorExeMain 0x48, 0xA1, // rex.w rex.b mov rax,[following address] 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,//address of iat:_CorExeMain entry 0xFF, 0xE0 // jmp [rax] }; #define ExeMainAMD64TemplateSize sizeof(ExeMainAMD64Template) #define CorExeMainAMD64IATOffset 2 //***************************************************************************** // This stub is designed for a AMD64 Windows application. It will call the // _CorDllMain function in mscoree.dll with with the base entry point for // the loaded DLL. This entry point will in turn load and run the IL program. // // mov rax, _CorDllMain(); // jmp [rax] // // The code jumps to the imported function _CorDllMain using the iat. // The address in the template is address of the iat entry which is // fixed up by the loader when the image is paged in. //***************************************************************************** const BYTE DllMainAMD64Template[] = { // Jump through IAT to CorDllMain 0x48, 0xA1, // rex.w rex.b mov rax,[following address] 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,//address of iat:_CorDllMain entry 0xFF, 0xE0 // jmp [rax] }; #define DllMainAMD64TemplateSize sizeof(DllMainAMD64Template) #define CorDllMainAMD64IATOffset 2 //***************************************************************************** // This stub is designed for an ia64 Windows application. It will call the // _CorExeMain function in mscoree.dll. This entry point will in turn load // and run the IL program. // // jump _CorExeMain(); // // The code jumps to the imported function _CorExeMain using the iat. // We set the value of gp to point at the iat table entry for _CorExeMain //***************************************************************************** const BYTE ExeMainIA64Template[] = { // ld8 r9 = [gp] ;; // ld8 r10 = [r9],8 // nop.i ;; // ld8 gp = [r9] // mov b6 = r10 // br.cond.sptk.few b6 // 0x0B, 0x48, 0x00, 0x02, 0x18, 0x10, 0xA0, 0x40, 0x24, 0x30, 0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x10, 0x08, 0x00, 0x12, 0x18, 0x10, 0x60, 0x50, 0x04, 0x80, 0x03, 0x00, 0x60, 0x00, 0x80, 0x00 }; #define ExeMainIA64TemplateSize sizeof(ExeMainIA64Template) //***************************************************************************** // This stub is designed for an ia64 Windows application. It will call the // _CorDllMain function in mscoree.dll with with the base entry point for // the loaded DLL. This entry point will in turn load and run the IL program. // // jump _CorDllMain // // The code jumps to the imported function _CorExeMain using the iat. // We set the value of gp to point at the iat table entry for _CorExeMain //***************************************************************************** const BYTE DllMainIA64Template[] = { // ld8 r9 = [gp] ;; // ld8 r10 = [r9],8 // nop.i ;; // ld8 gp = [r9] // mov b6 = r10 // br.cond.sptk.few b6 // 0x0B, 0x48, 0x00, 0x02, 0x18, 0x10, 0xA0, 0x40, 0x24, 0x30, 0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x10, 0x08, 0x00, 0x12, 0x18, 0x10, 0x60, 0x50, 0x04, 0x80, 0x03, 0x00, 0x60, 0x00, 0x80, 0x00 }; #define DllMainIA64TemplateSize sizeof(DllMainIA64Template) // Get the Symbol entry given the head and a 0-based index inline IMAGE_SYMBOL* GetSymbolEntry(IMAGE_SYMBOL* pHead, SIZE_T idx) { return (IMAGE_SYMBOL*) (((BYTE*) pHead) + IMAGE_SIZEOF_SYMBOL * idx); } //***************************************************************************** // To get a new instance, call CreateNewInstance() or CreateNewInstanceEx() instead of new //***************************************************************************** HRESULT CeeFileGenWriter::CreateNewInstance(CCeeGen *pCeeFileGenFrom, CeeFileGenWriter* & pGenWriter, DWORD createFlags) { return CreateNewInstanceEx(pCeeFileGenFrom, pGenWriter, createFlags); } // // Seed file is used as the base file. The new file data will be "appended" to the seed file // HRESULT CeeFileGenWriter::CreateNewInstanceEx(CCeeGen *pCeeFileGenFrom, CeeFileGenWriter* & pGenWriter, DWORD createFlags, LPCWSTR seedFileName) { HRESULT hr = S_OK; ULONG preallocatedOffset = 0; NewHolder<PEWriter> pPEWriter(NULL); NewHolder<CeeFileGenWriter> pPrivateGenWriter; CeeSection *corHeaderSection = NULL; pPrivateGenWriter = new (nothrow) CeeFileGenWriter; if (pPrivateGenWriter == NULL) IfFailGo(E_OUTOFMEMORY); pPEWriter = new (nothrow) PEWriter; if (pPEWriter == NULL) IfFailGo(E_OUTOFMEMORY); //workaround //What's really the correct thing to be doing here? //HRESULT hr = pPEWriter->Init(pCeeFileGenFrom ? pCeeFileGenFrom->getPESectionMan() : NULL); hr = pPEWriter->Init(NULL, createFlags, seedFileName); IfFailGo(hr); //Create the general PEWriter. pPrivateGenWriter->m_peSectionMan = pPEWriter; hr = pPrivateGenWriter->Init(); // base class member to finish init IfFailGo(hr); if (!seedFileName) // Use base file's preferred base (if present) { if (pPEWriter->isPE32()) { pPrivateGenWriter->setImageBase((DWORD) CEE_IMAGE_BASE_32); // use same default as linker } else { pPrivateGenWriter->setImageBase64((ULONGLONG) CEE_IMAGE_BASE_64); // use same default as linker } } pPrivateGenWriter->setSubsystem(IMAGE_SUBSYSTEM_WINDOWS_CUI, CEE_IMAGE_SUBSYSTEM_MAJOR_VERSION, CEE_IMAGE_SUBSYSTEM_MINOR_VERSION); if (pPEWriter->createCorMainStub()) { hr = pPrivateGenWriter->allocateIAT(); // so the IAT goes out first IfFailGo(hr); } hr = pPrivateGenWriter->allocateCorHeader(); // get COR header near front IfFailGo(hr); //If we were passed a CCeeGen at the beginning, copy it's data now. if (pCeeFileGenFrom) { pCeeFileGenFrom->cloneInstance((CCeeGen*)pPrivateGenWriter); } hr = pPrivateGenWriter->getSectionCreate(".text0", sdExecute, &corHeaderSection); IfFailGo(hr); preallocatedOffset = corHeaderSection->dataLen(); // set il RVA to be after the preallocated sections pPEWriter->setIlRva(preallocatedOffset); pPEWriter.SuppressRelease(); pPrivateGenWriter.SuppressRelease(); pGenWriter = pPrivateGenWriter; ErrExit: return hr; } // HRESULT CeeFileGenWriter::CreateNewInstance() CeeFileGenWriter::CeeFileGenWriter() // ctor is protected { m_outputFileName = NULL; m_resourceFileName = NULL; m_dllSwitch = false; m_entryPoint = 0; m_comImageFlags = COMIMAGE_FLAGS_ILONLY; // ceegen PEs don't have native code m_iatOffset = 0; m_dllCount = 0; m_dwManifestSize = 0; m_dwManifestRVA = NULL; m_dwStrongNameSize = 0; m_dwStrongNameRVA = NULL; m_dwVTableSize = 0; m_dwVTableRVA = NULL; m_iDataDlls = NULL; m_linked = false; m_fixed = false; } // CeeFileGenWriter::CeeFileGenWriter() //***************************************************************************** // Cleanup //***************************************************************************** HRESULT CeeFileGenWriter::Cleanup() // virtual { ((PEWriter *)m_peSectionMan)->Cleanup(); // call derived cleanup delete m_peSectionMan; m_peSectionMan = NULL; // so base class won't delete delete[] m_outputFileName; delete[] m_resourceFileName; if (m_iDataDlls) { for (int i=0; i < m_dllCount; i++) { if (m_iDataDlls[i].m_methodName) delete[] m_iDataDlls[i].m_methodName; } delete[] m_iDataDlls; } return CCeeGen::Cleanup(); } // HRESULT CeeFileGenWriter::Cleanup() HRESULT CeeFileGenWriter::link() { HRESULT hr = checkForErrors(); if (! SUCCEEDED(hr)) return hr; // Don't set this if SetManifestEntry was not called - zapper sets the // resource directory explicitly if (m_dwManifestSize != 0) { m_corHeader->Resources.VirtualAddress = VAL32(m_dwManifestRVA); m_corHeader->Resources.Size = VAL32(m_dwManifestSize); } if (m_dwStrongNameSize != 0) { m_corHeader->StrongNameSignature.VirtualAddress = VAL32(m_dwStrongNameRVA); m_corHeader->StrongNameSignature.Size = VAL32(m_dwStrongNameSize); } if (m_dwVTableSize != 0) { m_corHeader->VTableFixups.VirtualAddress = VAL32(m_dwVTableRVA); m_corHeader->VTableFixups.Size = VAL32(m_dwVTableSize); } unsigned characteristicsMask = IMAGE_FILE_EXECUTABLE_IMAGE; if (getPEWriter().isPE32()) characteristicsMask |= IMAGE_FILE_32BIT_MACHINE; if (!getPEWriter().isPE32()) characteristicsMask |= IMAGE_FILE_LARGE_ADDRESS_AWARE; getPEWriter().setCharacteristics(characteristicsMask); m_corHeader->cb = VAL32(sizeof(IMAGE_COR20_HEADER)); m_corHeader->MajorRuntimeVersion = VAL16(COR_VERSION_MAJOR); m_corHeader->MinorRuntimeVersion = VAL16(COR_VERSION_MINOR); if (m_dllSwitch) getPEWriter().setCharacteristics(IMAGE_FILE_DLL); m_corHeader->Flags = VAL32(m_comImageFlags); IMAGE_COR20_HEADER_FIELD(*m_corHeader, EntryPointToken) = VAL32(m_entryPoint); _ASSERTE(TypeFromToken(m_entryPoint) == mdtMethodDef || m_entryPoint == mdTokenNil || TypeFromToken(m_entryPoint) == mdtFile); setDirectoryEntry(getCorHeaderSection(), IMAGE_DIRECTORY_ENTRY_COMHEADER, sizeof(IMAGE_COR20_HEADER), m_corHeaderOffset); if ((m_comImageFlags & COMIMAGE_FLAGS_IL_LIBRARY) == 0 && !m_linked) { hr = emitExeMain(); if (FAILED(hr)) return hr; #ifndef TARGET_UNIX hr = emitResourceSection(); if (FAILED(hr)) return hr; #endif } m_linked = true; IfFailRet(getPEWriter().link()); return S_OK; } // HRESULT CeeFileGenWriter::link() HRESULT CeeFileGenWriter::fixup() { HRESULT hr; m_fixed = true; if (!m_linked) IfFailRet(link()); CeeGenTokenMapper *pMapper = getTokenMapper(); // Apply token remaps if there are any. if (! m_fTokenMapSupported && pMapper != NULL) { IMetaDataImport *pImport; hr = pMapper->GetMetaData(&pImport); _ASSERTE(SUCCEEDED(hr)); hr = MapTokens(pMapper, pImport); pImport->Release(); } // remap the entry point if entry point token has been moved if (pMapper != NULL) { mdToken tk = m_entryPoint; pMapper->HasTokenMoved(tk, tk); IMAGE_COR20_HEADER_FIELD(*m_corHeader, EntryPointToken) = VAL32(tk); } IfFailRet(getPEWriter().fixup(pMapper)); return S_OK; } // HRESULT CeeFileGenWriter::fixup() HRESULT CeeFileGenWriter::generateImage(void **ppImage) { HRESULT hr = S_OK; LPCWSTR outputFileName = NULL; #ifndef TARGET_UNIX HANDLE hThreadToken = NULL; // Impersonation is only supported on Win2k and above. if (!OpenThreadToken(GetCurrentThread(), TOKEN_READ | TOKEN_IMPERSONATE, TRUE, &hThreadToken)) { if (GetLastError() != ERROR_NO_TOKEN) { _ASSERTE(!"Failed to get thread token!"); return HRESULT_FROM_GetLastError(); } } if (hThreadToken != NULL) { if (!RevertToSelf()) { _ASSERTE(!"Failed to revert impersonation!"); CloseHandle(hThreadToken); return HRESULT_FROM_GetLastError(); } } #endif // !TARGET_UNIX if (!m_fixed) IfFailGo(fixup()); outputFileName = m_outputFileName; if (! outputFileName && ppImage == NULL) { if (m_comImageFlags & COMIMAGE_FLAGS_IL_LIBRARY) outputFileName = W("output.ill"); else if (m_dllSwitch) outputFileName = W("output.dll"); else outputFileName = W("output.exe"); } // output file name and ppImage are mutually exclusive _ASSERTE((NULL == outputFileName && ppImage != NULL) || (outputFileName != NULL && NULL == ppImage)); if (outputFileName != NULL) IfFailGo(getPEWriter().write(outputFileName)); else IfFailGo(getPEWriter().write(ppImage)); ErrExit: #ifndef TARGET_UNIX if (hThreadToken != NULL) { BOOL success = SetThreadToken(NULL, hThreadToken); CloseHandle(hThreadToken); if (!success) { _ASSERTE(!"Failed to reimpersonate!"); hr = HRESULT_FROM_GetLastError(); } } #endif // !TARGET_UNIX return hr; } // HRESULT CeeFileGenWriter::generateImage() HRESULT CeeFileGenWriter::setOutputFileName(_In_ LPWSTR fileName) { if (m_outputFileName) delete[] m_outputFileName; size_t len = wcslen(fileName) + 1; m_outputFileName = (LPWSTR)new (nothrow) WCHAR[len]; TESTANDRETURN(m_outputFileName!=NULL, E_OUTOFMEMORY); wcscpy_s(m_outputFileName, len, fileName); return S_OK; } // HRESULT CeeFileGenWriter::setOutputFileName() HRESULT CeeFileGenWriter::setResourceFileName(_In_ LPWSTR fileName) { if (m_resourceFileName) delete[] m_resourceFileName; size_t len = wcslen(fileName) + 1; m_resourceFileName = (LPWSTR)new (nothrow) WCHAR[len]; TESTANDRETURN(m_resourceFileName!=NULL, E_OUTOFMEMORY); wcscpy_s(m_resourceFileName, len, fileName); return S_OK; } // HRESULT CeeFileGenWriter::setResourceFileName() HRESULT CeeFileGenWriter::setImageBase(size_t imageBase) { _ASSERTE(getPEWriter().isPE32()); getPEWriter().setImageBase32((DWORD)imageBase); return S_OK; } // HRESULT CeeFileGenWriter::setImageBase() HRESULT CeeFileGenWriter::setImageBase64(ULONGLONG imageBase) { _ASSERTE(!getPEWriter().isPE32()); getPEWriter().setImageBase64(imageBase); return S_OK; } // HRESULT CeeFileGenWriter::setImageBase64() HRESULT CeeFileGenWriter::setFileAlignment(ULONG fileAlignment) { getPEWriter().setFileAlignment(fileAlignment); return S_OK; } // HRESULT CeeFileGenWriter::setFileAlignment() HRESULT CeeFileGenWriter::setSubsystem(DWORD subsystem, DWORD major, DWORD minor) { getPEWriter().setSubsystem(subsystem, major, minor); return S_OK; } // HRESULT CeeFileGenWriter::setSubsystem() HRESULT CeeFileGenWriter::checkForErrors() { if (TypeFromToken(m_entryPoint) == mdtMethodDef) { if (m_dllSwitch) { //current spec would need to check the binary sig of the entry point method } return S_OK; } return S_OK; } // HRESULT CeeFileGenWriter::checkForErrors() HRESULT CeeFileGenWriter::getMethodRVA(ULONG codeOffset, ULONG *codeRVA) { _ASSERTE(codeRVA); *codeRVA = getPEWriter().getIlRva() + codeOffset; return S_OK; } // HRESULT CeeFileGenWriter::getMethodRVA() HRESULT CeeFileGenWriter::setDirectoryEntry(CeeSection &section, ULONG entry, ULONG size, ULONG offset) { return getPEWriter().setDirectoryEntry((PEWriterSection*)(&section.getImpl()), entry, size, offset); } // HRESULT CeeFileGenWriter::setDirectoryEntry() HRESULT CeeFileGenWriter::getFileTimeStamp(DWORD *pTimeStamp) { return getPEWriter().getFileTimeStamp(pTimeStamp); } // HRESULT CeeFileGenWriter::getFileTimeStamp() HRESULT CeeFileGenWriter::setAddrReloc(UCHAR *instrAddr, DWORD value) { *(DWORD *)instrAddr = VAL32(value); return S_OK; } // HRESULT CeeFileGenWriter::setAddrReloc() HRESULT CeeFileGenWriter::addAddrReloc(CeeSection &thisSection, UCHAR *instrAddr, DWORD offset, CeeSection *targetSection) { if (!targetSection) { thisSection.addBaseReloc(offset, srRelocHighLow); } else { thisSection.addSectReloc(offset, *targetSection, srRelocHighLow); } return S_OK; } // HRESULT CeeFileGenWriter::addAddrReloc() // create CorExeMain and import directory into .text and the .iat into .data // // The structure of the import directory information is as follows, but it is not contiguous in // section. All the r/o data goes into the .text section and the iat array (which the loader // updates with the imported addresses) goes into the .data section because WINCE needs it to be writable. // // struct IData { // // one for each DLL, terminating in NULL // IMAGE_IMPORT_DESCRIPTOR iid[]; // // import lookup table: a set of entries for the methods of each DLL, // // terminating each set with NULL // IMAGE_THUNK_DATA32/64 ilt[]; // // hint/name table: an set of entries for each method of each DLL wiht // // no terminating entry // struct { // WORD Hint; // // null terminated string // BYTE Name[]; // } ibn; // Hint/name table // // import address table: a set of entries for the methods of each DLL, // // terminating each set with NULL // IMAGE_THUNK_DATA32/64 iat[]; // // one for each DLL, null terminated strings // BYTE DllName[]; // }; // // IAT must be first in its section, so have code here to allocate it up front // prior to knowing other info such as if dll or not. This won't work if have > 1 // function imported, but we'll burn that bridge when we get to it. HRESULT CeeFileGenWriter::allocateIAT() { m_dllCount = 1; m_iDataDlls = new (nothrow) IDataDllInfo[m_dllCount]; if (m_iDataDlls == NULL) { return E_OUTOFMEMORY; } memset(m_iDataDlls, '\0', m_dllCount * sizeof(IDataDllInfo)); m_iDataDlls[0].m_name = "mscoree.dll"; m_iDataDlls[0].m_numMethods = 1; m_iDataDlls[0].m_methodName = new (nothrow) const char*[m_iDataDlls[0].m_numMethods]; if (! m_iDataDlls[0].m_methodName) { return E_OUTOFMEMORY; } m_iDataDlls[0].m_methodName[0] = NULL; int iDataSizeIAT = 0; for (int i=0; i < m_dllCount; i++) { m_iDataDlls[i].m_iatOffset = iDataSizeIAT; iDataSizeIAT += (m_iDataDlls[i].m_numMethods + 1) * (getPEWriter().isPE32() ? sizeof(IMAGE_THUNK_DATA32) : sizeof(IMAGE_THUNK_DATA64)); } HRESULT hr = getSectionCreate(".text0", sdExecute, &m_iDataSectionIAT); TESTANDRETURNHR(hr); m_iDataOffsetIAT = m_iDataSectionIAT->dataLen(); _ASSERTE(m_iDataOffsetIAT == 0); m_iDataIAT = m_iDataSectionIAT->getBlock(iDataSizeIAT); if (! m_iDataIAT) { return E_OUTOFMEMORY; } memset(m_iDataIAT, '\0', iDataSizeIAT); // Don't set the IAT directory entry yet, since we may not actually end up doing // an emitExeMain. return S_OK; } // HRESULT CeeFileGenWriter::allocateIAT() HRESULT CeeFileGenWriter::emitExeMain() { if (m_dllCount == 0) return S_OK; // Note: code later on in this method assumes that mscoree.dll is at // index m_iDataDlls[0], with CorDllMain or CorExeMain at method[0] _ASSERTE(getPEWriter().createCorMainStub()); if (m_dllSwitch) { m_iDataDlls[0].m_methodName[0] = "_CorDllMain"; } else { m_iDataDlls[0].m_methodName[0] = "_CorExeMain"; } // IMAGE_IMPORT_DESCRIPTOR on PE/PE+ must be 4-byte or 8-byte aligned int align = (getPEWriter().isPE32()) ? 4 : 8; int curOffset = getTextSection().dataLen(); int diff = ((curOffset + align -1) & ~(align-1)) - curOffset; if (diff) { char* pDiff = getTextSection().getBlock(diff); if (NULL==pDiff) return E_OUTOFMEMORY; memset(pDiff,0,diff); } int iDataSizeRO = (m_dllCount + 1) * sizeof(IMAGE_IMPORT_DESCRIPTOR); CeeSection &iDataSectionRO = getTextSection(); int iDataOffsetRO = iDataSectionRO.dataLen(); int iDataSizeIAT = 0; int i; for (i=0; i < m_dllCount; i++) { m_iDataDlls[i].m_iltOffset = iDataSizeRO + iDataSizeIAT; iDataSizeIAT += (m_iDataDlls[i].m_numMethods + 1) * (getPEWriter().isPE32() ? sizeof(IMAGE_THUNK_DATA32) : sizeof(IMAGE_THUNK_DATA64)); } iDataSizeRO += iDataSizeIAT; for (i=0; i < m_dllCount; i++) { int delta = (iDataSizeRO + iDataOffsetRO) % 16; // make sure is on a 16-byte offset if (delta != 0) iDataSizeRO += (16 - delta); _ASSERTE((iDataSizeRO + iDataOffsetRO) % 16 == 0); m_iDataDlls[i].m_ibnOffset = iDataSizeRO; for (int j=0; j < m_iDataDlls[i].m_numMethods; j++) { int nameLen = (int)(strlen(m_iDataDlls[i].m_methodName[j]) + 1); iDataSizeRO += sizeof(WORD) + nameLen + nameLen%2; } } for (i=0; i < m_dllCount; i++) { m_iDataDlls[i].m_nameOffset = iDataSizeRO; iDataSizeRO += (int)(strlen(m_iDataDlls[i].m_name) + 2); } char *iDataRO = iDataSectionRO.getBlock(iDataSizeRO); if (!iDataRO) return E_OUTOFMEMORY; memset(iDataRO, '\0', iDataSizeRO); setDirectoryEntry(iDataSectionRO, IMAGE_DIRECTORY_ENTRY_IMPORT, iDataSizeRO, iDataOffsetRO); IMAGE_IMPORT_DESCRIPTOR *iid = (IMAGE_IMPORT_DESCRIPTOR *)iDataRO; for (i=0; i < m_dllCount; i++) { // fill in the import descriptors for each DLL IMAGE_IMPORT_DESC_FIELD(iid[i], OriginalFirstThunk) = VAL32((ULONG)(m_iDataDlls[i].m_iltOffset + iDataOffsetRO)); iid[i].Name = VAL32(m_iDataDlls[i].m_nameOffset + iDataOffsetRO); iid[i].FirstThunk = VAL32((ULONG)(m_iDataDlls[i].m_iatOffset + m_iDataOffsetIAT)); iDataSectionRO.addSectReloc( (unsigned)(iDataOffsetRO + (char *)(&IMAGE_IMPORT_DESC_FIELD(iid[i], OriginalFirstThunk)) - iDataRO), iDataSectionRO, srRelocAbsolute); iDataSectionRO.addSectReloc( (unsigned)(iDataOffsetRO + (char *)(&iid[i].Name) - iDataRO), iDataSectionRO, srRelocAbsolute); iDataSectionRO.addSectReloc( (unsigned)(iDataOffsetRO + (char *)(&iid[i].FirstThunk) - iDataRO), *m_iDataSectionIAT, srRelocAbsolute); if (getPEWriter().isPE32()) { // now fill in the import lookup table for each DLL IMAGE_THUNK_DATA32 *ilt = (IMAGE_THUNK_DATA32*) (iDataRO + m_iDataDlls[i].m_iltOffset); IMAGE_THUNK_DATA32 *iat = (IMAGE_THUNK_DATA32*) (m_iDataIAT + m_iDataDlls[i].m_iatOffset); int ibnOffset = m_iDataDlls[i].m_ibnOffset; for (int j=0; j < m_iDataDlls[i].m_numMethods; j++) { ilt[j].u1.AddressOfData = VAL32((ULONG)(ibnOffset + iDataOffsetRO)); iat[j].u1.AddressOfData = VAL32((ULONG)(ibnOffset + iDataOffsetRO)); iDataSectionRO.addSectReloc( (unsigned)(iDataOffsetRO + (char *)(&ilt[j].u1.AddressOfData) - iDataRO), iDataSectionRO, srRelocAbsolute); m_iDataSectionIAT->addSectReloc( (unsigned)(m_iDataOffsetIAT + (char *)(&iat[j].u1.AddressOfData) - m_iDataIAT), iDataSectionRO, srRelocAbsolute); int nameLen = (int)(strlen(m_iDataDlls[i].m_methodName[j]) + 1); memcpy(iDataRO + ibnOffset + offsetof(IMAGE_IMPORT_BY_NAME, Name), m_iDataDlls[i].m_methodName[j], nameLen); ibnOffset += sizeof(WORD) + nameLen + nameLen%2; } } else { // now fill in the import lookup table for each DLL IMAGE_THUNK_DATA64 *ilt = (IMAGE_THUNK_DATA64*) (iDataRO + m_iDataDlls[i].m_iltOffset); IMAGE_THUNK_DATA64 *iat = (IMAGE_THUNK_DATA64*) (m_iDataIAT + m_iDataDlls[i].m_iatOffset); int ibnOffset = m_iDataDlls[i].m_ibnOffset; for (int j=0; j < m_iDataDlls[i].m_numMethods; j++) { ilt[j].u1.AddressOfData = VAL64((ULONG)(ibnOffset + iDataOffsetRO)); iat[j].u1.AddressOfData = VAL64((ULONG)(ibnOffset + iDataOffsetRO)); iDataSectionRO.addSectReloc( (unsigned)(iDataOffsetRO + (char *)(&ilt[j].u1.AddressOfData) - iDataRO), iDataSectionRO, srRelocAbsolute); m_iDataSectionIAT->addSectReloc( (unsigned)(m_iDataOffsetIAT + (char *)(&iat[j].u1.AddressOfData) - m_iDataIAT), iDataSectionRO, srRelocAbsolute); int nameLen = (int)(strlen(m_iDataDlls[i].m_methodName[j]) + 1); memcpy(iDataRO + ibnOffset + offsetof(IMAGE_IMPORT_BY_NAME, Name), m_iDataDlls[i].m_methodName[j], nameLen); ibnOffset += sizeof(WORD) + nameLen + nameLen%2; } } // now fill in the import lookup table for each DLL strcpy_s(iDataRO + m_iDataDlls[i].m_nameOffset, iDataSizeRO - m_iDataDlls[i].m_nameOffset, m_iDataDlls[i].m_name); } // end of for loop i < m_dllCount if (getPEWriter().isI386()) { // Put the entry point code into the PE file unsigned entryPointOffset = getTextSection().dataLen(); int iatOffset = (int) (entryPointOffset + (m_dllSwitch ? CorDllMainX86IATOffset : CorExeMainX86IATOffset)); align = 4; // x86 fixups must be 4-byte aligned // The IAT offset must be aligned because fixup is applied to it. diff = ((iatOffset + align -1) & ~(align-1)) - iatOffset; if (diff) { char* pDiff = getTextSection().getBlock(diff); if(NULL==pDiff) return E_OUTOFMEMORY; memset(pDiff,0,diff); entryPointOffset += diff; } _ASSERTE((getTextSection().dataLen() + (m_dllSwitch ? CorDllMainX86IATOffset : CorExeMainX86IATOffset)) % align == 0); getPEWriter().setEntryPointTextOffset(entryPointOffset); if (m_dllSwitch) { UCHAR *dllMainBuf = (UCHAR*)getTextSection().getBlock(sizeof(DllMainX86Template)); if(dllMainBuf==NULL) return E_OUTOFMEMORY; memcpy(dllMainBuf, DllMainX86Template, sizeof(DllMainX86Template)); //mscoree.dll setAddrReloc(dllMainBuf+CorDllMainX86IATOffset, m_iDataDlls[0].m_iatOffset + m_iDataOffsetIAT); addAddrReloc(getTextSection(), dllMainBuf, entryPointOffset+CorDllMainX86IATOffset, m_iDataSectionIAT); } else { UCHAR *exeMainBuf = (UCHAR*)getTextSection().getBlock(sizeof(ExeMainX86Template)); if(exeMainBuf==NULL) return E_OUTOFMEMORY; memcpy(exeMainBuf, ExeMainX86Template, sizeof(ExeMainX86Template)); //mscoree.dll setAddrReloc(exeMainBuf+CorExeMainX86IATOffset, m_iDataDlls[0].m_iatOffset + m_iDataOffsetIAT); addAddrReloc(getTextSection(), exeMainBuf, entryPointOffset+CorExeMainX86IATOffset, m_iDataSectionIAT); } } else if (getPEWriter().isAMD64()) { // Put the entry point code into the PE file unsigned entryPointOffset = getTextSection().dataLen(); int iatOffset = (int) (entryPointOffset + (m_dllSwitch ? CorDllMainAMD64IATOffset : CorExeMainAMD64IATOffset)); align = 16; // AMD64 fixups must be 8-byte aligned // The IAT offset must be aligned because fixup is applied to it. diff = ((iatOffset + align -1) & ~(align-1)) - iatOffset; if (diff) { char* pDiff = getTextSection().getBlock(diff); if(NULL==pDiff) return E_OUTOFMEMORY; memset(pDiff,0,diff); entryPointOffset += diff; } _ASSERTE((getTextSection().dataLen() + (m_dllSwitch ? CorDllMainAMD64IATOffset : CorExeMainAMD64IATOffset)) % align == 0); getPEWriter().setEntryPointTextOffset(entryPointOffset); if (m_dllSwitch) { UCHAR *dllMainBuf = (UCHAR*)getTextSection().getBlock(sizeof(DllMainAMD64Template)); if(dllMainBuf==NULL) return E_OUTOFMEMORY; memcpy(dllMainBuf, DllMainAMD64Template, sizeof(DllMainAMD64Template)); //mscoree.dll setAddrReloc(dllMainBuf+CorDllMainAMD64IATOffset, m_iDataDlls[0].m_iatOffset + m_iDataOffsetIAT); addAddrReloc(getTextSection(), dllMainBuf, entryPointOffset+CorDllMainAMD64IATOffset, m_iDataSectionIAT); } else { UCHAR *exeMainBuf = (UCHAR*)getTextSection().getBlock(sizeof(ExeMainAMD64Template)); if(exeMainBuf==NULL) return E_OUTOFMEMORY; memcpy(exeMainBuf, ExeMainAMD64Template, sizeof(ExeMainAMD64Template)); //mscoree.dll setAddrReloc(exeMainBuf+CorExeMainAMD64IATOffset, m_iDataDlls[0].m_iatOffset + m_iDataOffsetIAT); addAddrReloc(getTextSection(), exeMainBuf, entryPointOffset+CorExeMainAMD64IATOffset, m_iDataSectionIAT); } } else if (getPEWriter().isIA64()) { // Must have a PE+ PE64 file //_ASSERTE(!getPEWriter().isPE32()); // Put the entry point code into the PE+ file curOffset = getTextSection().dataLen(); align = 16; // instructions on ia64 must be 16-byte aligned // The entry point address be aligned diff = ((curOffset + align -1) & ~(align-1)) - curOffset; if (diff) { char* pDiff = getTextSection().getBlock(diff); if(NULL==pDiff) return E_OUTOFMEMORY; memset(pDiff,0,diff); } unsigned entryPointOffset = getTextSection().dataLen(); if (m_dllSwitch) { UCHAR *dllMainBuf = (UCHAR*)getTextSection().getBlock(sizeof(DllMainIA64Template)); if (dllMainBuf==NULL) return E_OUTOFMEMORY; memcpy(dllMainBuf, DllMainIA64Template, sizeof(DllMainIA64Template)); } else { UCHAR *exeMainBuf = (UCHAR*)getTextSection().getBlock(sizeof(ExeMainIA64Template)); if (exeMainBuf==NULL) return E_OUTOFMEMORY; memcpy(exeMainBuf, ExeMainIA64Template, sizeof(ExeMainIA64Template)); } // Put the entry point function pointer into the PE file unsigned entryPlabelOffset = getTextSection().dataLen(); getPEWriter().setEntryPointTextOffset(entryPlabelOffset); UCHAR * entryPtr = (UCHAR*)getTextSection().getBlock(sizeof(ULONGLONG)); UCHAR * gpPtr = (UCHAR*)getTextSection().getBlock(sizeof(ULONGLONG)); memset(entryPtr,0,sizeof(ULONGLONG)); memset(gpPtr,0,sizeof(ULONGLONG)); setAddrReloc(entryPtr, entryPointOffset); addAddrReloc(getTextSection(), entryPtr, entryPlabelOffset, &getTextSection()); setAddrReloc(gpPtr, m_iDataDlls[0].m_iatOffset + m_iDataOffsetIAT); addAddrReloc(getTextSection(), gpPtr, entryPlabelOffset+8, m_iDataSectionIAT); } else { _ASSERTE(!"Unknown target machine"); } // Now set our IAT entry since we're using the IAT setDirectoryEntry(*m_iDataSectionIAT, IMAGE_DIRECTORY_ENTRY_IAT, iDataSizeIAT, m_iDataOffsetIAT); return S_OK; } // HRESULT CeeFileGenWriter::emitExeMain() #ifndef TARGET_UNIX // This function reads a resource file and emits it into the generated PE file. // 1. We can only link resources in obj format. Must convert from .res to .obj // with CvtRes.exe. See https://github.com/dotnet/runtime/issues/11412. // 2. Must touch up all COFF relocs from .rsrc$01 (resource header) to .rsrc$02 // (resource raw data) HRESULT CeeFileGenWriter::emitResourceSection() { if (m_resourceFileName == NULL) return S_OK; const WCHAR* szResFileName = m_resourceFileName; // read the resource file and spit it out in the .rsrc section HANDLE hFile = INVALID_HANDLE_VALUE; HANDLE hMap = NULL; IMAGE_FILE_HEADER *hMod = NULL; HRESULT hr = S_OK; struct Param { HANDLE hFile; HANDLE hMap; IMAGE_FILE_HEADER *hMod; const WCHAR* szResFileName; CeeFileGenWriter *genWriter; HRESULT hr; } param; param.hFile = hFile; param.hMap = hMap; param.hMod = hMod; param.szResFileName = szResFileName; param.genWriter = this; param.hr = S_OK; PAL_TRY(Param *, pParam, &param) { SIZE_T cbFileSize; const BYTE *pbStartOfMappedMem; IMAGE_SECTION_HEADER *rsrc[2] = { NULL, NULL }; S_SIZE_T cbTotalSizeOfRawData; char *data = NULL; SIZE_T cReloc = 0; IMAGE_RELOCATION *pReloc = NULL; SIZE_T cSymbol = 0; IMAGE_SYMBOL *pSymbolTable = NULL; // create a mapped view of the .res file pParam->hFile = WszCreateFile(pParam->szResFileName, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); if (pParam->hFile == INVALID_HANDLE_VALUE) { //dbprintf("Resource file %S not found\n", szResFileName); pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } // Grab the file size for verification checks. { DWORD dwFileSizeHigh; DWORD dwFileSize = SafeGetFileSize(pParam->hFile, &dwFileSizeHigh); if (dwFileSize == (DWORD)(-1)) { pParam->hr = HRESULT_FROM_GetLastError(); goto lDone; } // Since we intend to memory map this file, the size of the file can not need 64 bits to represent! if (dwFileSizeHigh != 0) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } cbFileSize = static_cast<SIZE_T>(dwFileSize); } pParam->hMap = WszCreateFileMapping(pParam->hFile, 0, PAGE_READONLY, 0, 0, NULL); if (pParam->hMap == NULL) { //dbprintf("Invalid .res file: %S\n", szResFileName); pParam->hr = HRESULT_FROM_GetLastError(); goto lDone; } pbStartOfMappedMem = reinterpret_cast<const BYTE *>(MapViewOfFile(pParam->hMap, FILE_MAP_READ, 0, 0, 0)); // test failure conditions if (pbStartOfMappedMem == NULL) { //dbprintf("Invalid .res file: %S:Can't get header\n", szResFileName); pParam->hr = HRESULT_FROM_GetLastError(); goto lDone; } // Check that the file contains an IMAGE_FILE_HEADER structure. if (IMAGE_SIZEOF_FILE_HEADER > cbFileSize) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } pParam->hMod = (IMAGE_FILE_HEADER*)pbStartOfMappedMem; if (VAL16(pParam->hMod->SizeOfOptionalHeader) != 0) { //dbprintf("Invalid .res file: %S:Illegal optional header\n", szResFileName); pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); // GetLastError() = 0 since API worked. goto lDone; } // Scan all section headers and grab .rsrc$01 and .rsrc$02 { // First section is directly after header SIZE_T cSections = static_cast<SIZE_T>(VAL16(pParam->hMod->NumberOfSections)); SIZE_T cbStartOfSections = IMAGE_SIZEOF_FILE_HEADER; S_SIZE_T cbEndOfSections(S_SIZE_T(cbStartOfSections) + (S_SIZE_T(cSections) * S_SIZE_T(IMAGE_SIZEOF_SECTION_HEADER))); // Check that all sections are within the bounds of the mapped file. if (cbEndOfSections.IsOverflow() || cbEndOfSections.Value() > cbFileSize) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } { IMAGE_SECTION_HEADER *pSection = (IMAGE_SECTION_HEADER *)(pbStartOfMappedMem + cbStartOfSections); IMAGE_SECTION_HEADER *pSectionEnd = pSection + cSections; for (; pSection < pSectionEnd; pSection++) { if (strcmp(".rsrc$01", (char *)pSection->Name) == 0) { rsrc[0] = pSection; } else if (strcmp(".rsrc$02", (char *)pSection->Name) == 0) { rsrc[1] = pSection; } } } } // If we don't have both resources, fail. if (!rsrc[0] || !rsrc[1]) { //dbprintf("Invalid .res file: %S: Missing sections .rsrc$01 or .rsrc$02\n", szResFileName); pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } // Verify the resource data starts and sizes { cbTotalSizeOfRawData = S_SIZE_T(0); for (int i = 0; i < 2; i++) { S_SIZE_T cbStartOfResourceData(static_cast<SIZE_T>(VAL32(rsrc[i]->PointerToRawData))); S_SIZE_T cbSizeOfResourceData(static_cast<SIZE_T>(VAL32(rsrc[i]->SizeOfRawData))); S_SIZE_T cbEndOfResourceData(cbStartOfResourceData + cbSizeOfResourceData); if (cbEndOfResourceData.IsOverflow() || cbEndOfResourceData.Value() > cbFileSize) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } cbTotalSizeOfRawData += cbSizeOfResourceData; } // Check that the total raw data doesn't overflow. if (cbTotalSizeOfRawData.IsOverflow() || cbTotalSizeOfRawData.Value() > cbFileSize) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } } PESection *rsrcSection; pParam->hr = pParam->genWriter->getPEWriter().getSectionCreate(".rsrc", sdReadOnly, &rsrcSection); if (FAILED(pParam->hr)) goto lDone; rsrcSection->directoryEntry(IMAGE_DIRECTORY_ENTRY_RESOURCE); data = rsrcSection->getBlock(static_cast<unsigned>(cbTotalSizeOfRawData.Value()), 8); if(data == NULL) { pParam->hr = E_OUTOFMEMORY; goto lDone; } // Copy resource header memcpy(data, (char *)pParam->hMod + VAL32(rsrc[0]->PointerToRawData), VAL32(rsrc[0]->SizeOfRawData)); // Map all the relocs in .rsrc$01 using the reloc and symbol tables in the COFF object., cReloc = 0; // Total number of relocs pReloc = NULL; // Reloc table start cSymbol = 0; // Total number of symbols pSymbolTable = NULL; // Symbol table start { // Check that the relocations and symbols lie within the resource cReloc = VAL16(rsrc[0]->NumberOfRelocations); SIZE_T cbStartOfRelocations = static_cast<SIZE_T>(VAL32(rsrc[0]->PointerToRelocations)); S_SIZE_T cbEndOfRelocations(S_SIZE_T(cbStartOfRelocations) + (S_SIZE_T(cReloc) * S_SIZE_T(sizeof(IMAGE_RELOCATION)))); // Verify the number of symbols fit into the resource. cSymbol = static_cast<SIZE_T>(VAL32(pParam->hMod->NumberOfSymbols)); SIZE_T cbStartOfSymbolTable = static_cast<SIZE_T>(VAL32(pParam->hMod->PointerToSymbolTable)); S_SIZE_T cbEndOfSymbolTable(S_SIZE_T(cbStartOfSymbolTable) + (S_SIZE_T(cSymbol) * S_SIZE_T(IMAGE_SIZEOF_SYMBOL))); if (cbEndOfRelocations.IsOverflow() || cbEndOfRelocations.Value() > cbFileSize || cbEndOfSymbolTable.IsOverflow() || cbEndOfSymbolTable.Value() > cbFileSize) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } pReloc = (IMAGE_RELOCATION *)(pbStartOfMappedMem + cbStartOfRelocations); pSymbolTable = (IMAGE_SYMBOL *)(pbStartOfMappedMem + cbStartOfSymbolTable); } _ASSERTE(pReloc != NULL && pSymbolTable != NULL); for(SIZE_T iReloc = 0; iReloc < cReloc; iReloc++, pReloc++) { // Ensure this is a valid reloc { S_SIZE_T cbRelocEnd = S_SIZE_T(VAL32(pReloc->VirtualAddress)) + S_SIZE_T(sizeof(DWORD)); if (cbRelocEnd.IsOverflow() || cbRelocEnd.Value() > static_cast<SIZE_T>(VAL32(rsrc[0]->SizeOfRawData))) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } } // index into symbol table, provides address into $02 DWORD iSymbol = VAL32(pReloc->SymbolTableIndex); // Make sure the index is in range if (iSymbol >= cSymbol) { pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } IMAGE_SYMBOL* pSymbolEntry = GetSymbolEntry(pSymbolTable, iSymbol); // Ensure the symbol entry is valid for a resource. if ((pSymbolEntry->StorageClass != IMAGE_SYM_CLASS_STATIC) || (VAL16(pSymbolEntry->Type) != IMAGE_SYM_TYPE_NULL) || (VAL16(pSymbolEntry->SectionNumber) != 3)) // 3rd section is .rsrc$02 { //dbprintf("Invalid .res file: %S:Illegal symbol entry\n", szResFileName); pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } // Ensure that RVA is valid address (inside rsrc[1]) if (VAL32(pSymbolEntry->Value) >= VAL32(rsrc[1]->SizeOfRawData)) { //dbprintf("Invalid .res file: %S:Illegal rva into .rsrc$02\n", szResFileName); pParam->hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); goto lDone; } DWORD dwOffsetInRsrc2 = VAL32(pSymbolEntry->Value) + VAL32(rsrc[0]->SizeOfRawData); // Create reloc *(DWORD*)(data + VAL32(pReloc->VirtualAddress)) = VAL32(dwOffsetInRsrc2); rsrcSection->addSectReloc(pReloc->VirtualAddress, rsrcSection, srRelocAbsolute); } // Copy $02 (resource raw) data memcpy(data+VAL32(rsrc[0]->SizeOfRawData), (char *)pParam->hMod + VAL32(rsrc[1]->PointerToRawData), VAL32(rsrc[1]->SizeOfRawData)); lDone: ; } PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) { //dbprintf("Exception occured manipulating .res file %S\n", szResFileName); param.hr = HRESULT_FROM_WIN32(ERROR_RESOURCE_DATA_NOT_FOUND); } PAL_ENDTRY hMod = param.hMod; hFile = param.hFile; szResFileName = param.szResFileName; hr = param.hr; if (hMod != NULL) UnmapViewOfFile(hMod); if (hMap != NULL) CloseHandle(hMap); if (hFile != INVALID_HANDLE_VALUE) CloseHandle(hFile); return hr; } // HRESULT CeeFileGenWriter::emitResourceSection() #endif // !TARGET_UNIX HRESULT CeeFileGenWriter::setManifestEntry(ULONG size, ULONG offset) { if (offset) m_dwManifestRVA = offset; else { CeeSection TextSection = getTextSection(); getMethodRVA(TextSection.dataLen() - size, &m_dwManifestRVA); } m_dwManifestSize = size; return S_OK; } // HRESULT CeeFileGenWriter::setManifestEntry() HRESULT CeeFileGenWriter::setStrongNameEntry(ULONG size, ULONG offset) { m_dwStrongNameRVA = offset; m_dwStrongNameSize = size; return S_OK; } // HRESULT CeeFileGenWriter::setStrongNameEntry() HRESULT CeeFileGenWriter::setVTableEntry64(ULONG size, void* ptr) { if (ptr && size) { void * pv; CeeSection TextSection = getTextSection(); // make it DWORD-aligned ULONG L = TextSection.dataLen(); if((L &= ((ULONG)sizeof(DWORD)-1))) { L = (ULONG)sizeof(DWORD) - L; if((pv = TextSection.getBlock(L))) memset(pv,0,L); else return E_OUTOFMEMORY; } getMethodRVA(TextSection.dataLen(), &m_dwVTableRVA); if((pv = TextSection.getBlock(size))) { memcpy(pv,ptr,size); } else return E_OUTOFMEMORY; m_dwVTableSize = size; } return S_OK; } // HRESULT CeeFileGenWriter::setVTableEntry() HRESULT CeeFileGenWriter::setVTableEntry(ULONG size, ULONG offset) { return setVTableEntry64(size,(void*)(ULONG_PTR)offset); } // HRESULT CeeFileGenWriter::setVTableEntry() HRESULT CeeFileGenWriter::computeSectionOffset(CeeSection &section, _In_ char *ptr, unsigned *offset) { *offset = section.computeOffset(ptr); return S_OK; } // HRESULT CeeFileGenWriter::computeSectionOffset() HRESULT CeeFileGenWriter::computeOffset(_In_ char *ptr, CeeSection **pSection, unsigned *offset) { TESTANDRETURNPOINTER(pSection); CeeSection **s = m_sections; CeeSection **sEnd = s + m_numSections; while (s < sEnd) { if ((*s)->containsPointer(ptr)) { *pSection = *s; *offset = (*s)->computeOffset(ptr); return S_OK; } s++; } return E_FAIL; } // HRESULT CeeFileGenWriter::computeOffset() HRESULT CeeFileGenWriter::getCorHeader(IMAGE_COR20_HEADER **ppHeader) { *ppHeader = m_corHeader; return S_OK; } // HRESULT CeeFileGenWriter::getCorHeader()
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/mono/mono/utils/strenc.h
/** * \file * string encoding conversions * * Author: * Dick Porter ([email protected]) * * (C) 2003 Ximian, Inc. */ #ifndef _MONO_STRENC_H_ #define _MONO_STRENC_H_ 1 #include <glib.h> #include <mono/utils/mono-publib.h> MONO_API gunichar2 *mono_unicode_from_external (const gchar *in, gsize *bytes); MONO_API gchar *mono_utf8_from_external (const gchar *in); MONO_API gchar *mono_unicode_to_external (const gunichar2 *uni); MONO_API gboolean mono_utf8_validate_and_len (const gchar *source, glong* oLength, const gchar** oEnd); MONO_API gboolean mono_utf8_validate_and_len_with_bounds (const gchar *source, glong max_bytes, glong* oLength, const gchar** oEnd); #endif /* _MONO_STRENC_H_ */
/** * \file * string encoding conversions * * Author: * Dick Porter ([email protected]) * * (C) 2003 Ximian, Inc. */ #ifndef _MONO_STRENC_H_ #define _MONO_STRENC_H_ 1 #include <glib.h> #include <mono/utils/mono-publib.h> MONO_API gunichar2 *mono_unicode_from_external (const gchar *in, gsize *bytes); MONO_API gchar *mono_utf8_from_external (const gchar *in); MONO_API gchar *mono_unicode_to_external (const gunichar2 *uni); MONO_API gboolean mono_utf8_validate_and_len (const gchar *source, glong* oLength, const gchar** oEnd); MONO_API gboolean mono_utf8_validate_and_len_with_bounds (const gchar *source, glong max_bytes, glong* oLength, const gchar** oEnd); #endif /* _MONO_STRENC_H_ */
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/vm/comsynchronizable.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Header: COMSynchronizable.h ** ** Purpose: Native methods on System.SynchronizableObject ** and its subclasses. ** ** ===========================================================*/ #ifndef _COMSYNCHRONIZABLE_H #define _COMSYNCHRONIZABLE_H #include "field.h" // For FieldDesc definition. // // Each function that we call through native only gets one argument, // which is actually a pointer to its stack of arguments. Our structs // for accessing these are defined below. // struct SharedState; class ThreadNative { friend class ThreadBaseObject; public: enum { PRIORITY_LOWEST = 0, PRIORITY_BELOW_NORMAL = 1, PRIORITY_NORMAL = 2, PRIORITY_ABOVE_NORMAL = 3, PRIORITY_HIGHEST = 4, }; enum { ThreadStopRequested = 1, ThreadSuspendRequested = 2, ThreadBackground = 4, ThreadUnstarted = 8, ThreadStopped = 16, ThreadWaitSleepJoin = 32, ThreadSuspended = 64, ThreadAbortRequested = 128, }; enum { ApartmentSTA = 0, ApartmentMTA = 1, ApartmentUnknown = 2 }; static FCDECL1(INT32, GetPriority, ThreadBaseObject* pThisUNSAFE); static FCDECL2(void, SetPriority, ThreadBaseObject* pThisUNSAFE, INT32 iPriority); static FCDECL1(void, Interrupt, ThreadBaseObject* pThisUNSAFE); static FCDECL1(FC_BOOL_RET, IsAlive, ThreadBaseObject* pThisUNSAFE); static FCDECL2(FC_BOOL_RET, Join, ThreadBaseObject* pThisUNSAFE, INT32 Timeout); #undef Sleep static FCDECL1(void, Sleep, INT32 iTime); #define Sleep(a) Dont_Use_Sleep(a) static FCDECL1(void, Initialize, ThreadBaseObject* pThisUNSAFE); static FCDECL2(void, SetBackground, ThreadBaseObject* pThisUNSAFE, CLR_BOOL isBackground); static FCDECL1(FC_BOOL_RET, IsBackground, ThreadBaseObject* pThisUNSAFE); static FCDECL1(INT32, GetThreadState, ThreadBaseObject* pThisUNSAFE); static FCDECL1(INT32, GetThreadContext, ThreadBaseObject* pThisUNSAFE); #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT static FCDECL1(INT32, GetApartmentState, ThreadBaseObject* pThis); static FCDECL2(INT32, SetApartmentState, ThreadBaseObject* pThisUNSAFE, INT32 iState); #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT static FCDECL1(INT32, GetManagedThreadId, ThreadBaseObject* th); static FCDECL0(INT32, GetOptimalMaxSpinWaitsPerSpinIteration); static FCDECL1(void, SpinWait, int iterations); static FCDECL0(Object*, GetCurrentThread); static FCDECL1(void, Finalize, ThreadBaseObject* pThis); #ifdef FEATURE_COMINTEROP static FCDECL1(void, DisableComObjectEagerCleanup, ThreadBaseObject* pThis); #endif //FEATURE_COMINTEROP static FCDECL1(FC_BOOL_RET,IsThreadpoolThread, ThreadBaseObject* thread); static FCDECL1(void, SetIsThreadpoolThread, ThreadBaseObject* thread); static FCDECL0(INT32, GetCurrentProcessorNumber); static void Start(Thread* pNewThread, int threadStackSize, int priority, PCWSTR pThreadName); static void InformThreadNameChange(Thread* pThread, LPCWSTR name, INT32 len); private: struct KickOffThread_Args { Thread *pThread; SharedState *share; ULONG retVal; }; static void KickOffThread_Worker(LPVOID /* KickOffThread_Args* */); static ULONG WINAPI KickOffThread(void *pass); static BOOL DoJoin(THREADBASEREF DyingThread, INT32 timeout); }; extern "C" void QCALLTYPE ThreadNative_Start(QCall::ThreadHandle thread, int threadStackSize, int priority, PCWSTR pThreadName); extern "C" void QCALLTYPE ThreadNative_UninterruptibleSleep0(); extern "C" void QCALLTYPE ThreadNative_InformThreadNameChange(QCall::ThreadHandle thread, LPCWSTR name, INT32 len); extern "C" UINT64 QCALLTYPE ThreadNative_GetProcessDefaultStackSize(); extern "C" BOOL QCALLTYPE ThreadNative_YieldThread(); extern "C" UINT64 QCALLTYPE ThreadNative_GetCurrentOSThreadId(); #endif // _COMSYNCHRONIZABLE_H
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Header: COMSynchronizable.h ** ** Purpose: Native methods on System.SynchronizableObject ** and its subclasses. ** ** ===========================================================*/ #ifndef _COMSYNCHRONIZABLE_H #define _COMSYNCHRONIZABLE_H #include "field.h" // For FieldDesc definition. // // Each function that we call through native only gets one argument, // which is actually a pointer to its stack of arguments. Our structs // for accessing these are defined below. // struct SharedState; class ThreadNative { friend class ThreadBaseObject; public: enum { PRIORITY_LOWEST = 0, PRIORITY_BELOW_NORMAL = 1, PRIORITY_NORMAL = 2, PRIORITY_ABOVE_NORMAL = 3, PRIORITY_HIGHEST = 4, }; enum { ThreadStopRequested = 1, ThreadSuspendRequested = 2, ThreadBackground = 4, ThreadUnstarted = 8, ThreadStopped = 16, ThreadWaitSleepJoin = 32, ThreadSuspended = 64, ThreadAbortRequested = 128, }; enum { ApartmentSTA = 0, ApartmentMTA = 1, ApartmentUnknown = 2 }; static FCDECL1(INT32, GetPriority, ThreadBaseObject* pThisUNSAFE); static FCDECL2(void, SetPriority, ThreadBaseObject* pThisUNSAFE, INT32 iPriority); static FCDECL1(void, Interrupt, ThreadBaseObject* pThisUNSAFE); static FCDECL1(FC_BOOL_RET, IsAlive, ThreadBaseObject* pThisUNSAFE); static FCDECL2(FC_BOOL_RET, Join, ThreadBaseObject* pThisUNSAFE, INT32 Timeout); #undef Sleep static FCDECL1(void, Sleep, INT32 iTime); #define Sleep(a) Dont_Use_Sleep(a) static FCDECL1(void, Initialize, ThreadBaseObject* pThisUNSAFE); static FCDECL2(void, SetBackground, ThreadBaseObject* pThisUNSAFE, CLR_BOOL isBackground); static FCDECL1(FC_BOOL_RET, IsBackground, ThreadBaseObject* pThisUNSAFE); static FCDECL1(INT32, GetThreadState, ThreadBaseObject* pThisUNSAFE); static FCDECL1(INT32, GetThreadContext, ThreadBaseObject* pThisUNSAFE); #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT static FCDECL1(INT32, GetApartmentState, ThreadBaseObject* pThis); static FCDECL2(INT32, SetApartmentState, ThreadBaseObject* pThisUNSAFE, INT32 iState); #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT static FCDECL1(INT32, GetManagedThreadId, ThreadBaseObject* th); static FCDECL0(INT32, GetOptimalMaxSpinWaitsPerSpinIteration); static FCDECL1(void, SpinWait, int iterations); static FCDECL0(Object*, GetCurrentThread); static FCDECL1(void, Finalize, ThreadBaseObject* pThis); #ifdef FEATURE_COMINTEROP static FCDECL1(void, DisableComObjectEagerCleanup, ThreadBaseObject* pThis); #endif //FEATURE_COMINTEROP static FCDECL1(FC_BOOL_RET,IsThreadpoolThread, ThreadBaseObject* thread); static FCDECL1(void, SetIsThreadpoolThread, ThreadBaseObject* thread); static FCDECL0(INT32, GetCurrentProcessorNumber); static void Start(Thread* pNewThread, int threadStackSize, int priority, PCWSTR pThreadName); static void InformThreadNameChange(Thread* pThread, LPCWSTR name, INT32 len); private: struct KickOffThread_Args { Thread *pThread; SharedState *share; ULONG retVal; }; static void KickOffThread_Worker(LPVOID /* KickOffThread_Args* */); static ULONG WINAPI KickOffThread(void *pass); static BOOL DoJoin(THREADBASEREF DyingThread, INT32 timeout); }; extern "C" void QCALLTYPE ThreadNative_Start(QCall::ThreadHandle thread, int threadStackSize, int priority, PCWSTR pThreadName); extern "C" void QCALLTYPE ThreadNative_UninterruptibleSleep0(); extern "C" void QCALLTYPE ThreadNative_InformThreadNameChange(QCall::ThreadHandle thread, LPCWSTR name, INT32 len); extern "C" UINT64 QCALLTYPE ThreadNative_GetProcessDefaultStackSize(); extern "C" BOOL QCALLTYPE ThreadNative_YieldThread(); extern "C" UINT64 QCALLTYPE ThreadNative_GetCurrentOSThreadId(); #endif // _COMSYNCHRONIZABLE_H
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/vm/managedmdimport.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" #include "mlinfo.h" #include "managedmdimport.hpp" #include "wrappers.h" void ThrowMetaDataImportException(HRESULT hr) { WRAPPER_NO_CONTRACT; if (hr == CLDB_E_RECORD_NOTFOUND) return; MethodDescCallSite throwError(METHOD__METADATA_IMPORT__THROW_ERROR); ARG_SLOT args[] = { (ARG_SLOT)hr }; throwError.Call(args); } // // MetaDataImport // extern BOOL ParseNativeTypeInfo(NativeTypeParamInfo* pInfo, PCCOR_SIGNATURE pvNativeType, ULONG cbNativeType); FCIMPL11(void, MetaDataImport::GetMarshalAs, BYTE* pvNativeType, ULONG cbNativeType, INT32* unmanagedType, INT32* safeArraySubType, STRINGREF* safeArrayUserDefinedSubType, INT32* arraySubType, INT32* sizeParamIndex, INT32* sizeConst, STRINGREF* marshalType, STRINGREF* marshalCookie, INT32* iidParamIndex) { FCALL_CONTRACT; HELPER_METHOD_FRAME_BEGIN_0(); { NativeTypeParamInfo info; ZeroMemory(&info, sizeof(NativeTypeParamInfo)); if (!ParseNativeTypeInfo(&info, pvNativeType, cbNativeType)) { ThrowMetaDataImportException(E_FAIL); } *unmanagedType = info.m_NativeType; *sizeParamIndex = info.m_CountParamIdx; *sizeConst = info.m_Additive; *arraySubType = info.m_ArrayElementType; #ifdef FEATURE_COMINTEROP *iidParamIndex = info.m_IidParamIndex; *safeArraySubType = info.m_SafeArrayElementVT; *safeArrayUserDefinedSubType = info.m_strSafeArrayUserDefTypeName == NULL ? NULL : StringObject::NewString(info.m_strSafeArrayUserDefTypeName, info.m_cSafeArrayUserDefTypeNameBytes); #else *iidParamIndex = 0; *safeArraySubType = VT_EMPTY; *safeArrayUserDefinedSubType = NULL; #endif *marshalType = info.m_strCMMarshalerTypeName == NULL ? NULL : StringObject::NewString(info.m_strCMMarshalerTypeName, info.m_cCMMarshalerTypeNameBytes); *marshalCookie = info.m_strCMCookie == NULL ? NULL : StringObject::NewString(info.m_strCMCookie, info.m_cCMCookieStrBytes); } HELPER_METHOD_FRAME_END(); } FCIMPLEND MDImpl4(Object *, MetaDataImport::GetDefaultValue, mdToken tk, INT64* pDefaultValue, INT32* pLength, INT32* pCorElementType) { FCALL_CONTRACT; HRESULT hr = S_OK; Object *pRetVal = NULL; IMDInternalImport *_pScope = pScope; MDDefaultValue value; IfFailGo(_pScope->GetDefaultValue(tk, &value)); // We treat string values differently. That's because on big-endian architectures we can't return a // pointer to static string data in the metadata, we have to buffer the string in order to byte-swap // all the unicode characters. MDDefaultValue therefore has a destructor on big-endian machines which // reclaims this buffer, implying we can't safely return the embedded pointer to managed code. // The easiest thing for us to do is to construct the managed string object here, in the context of // the still valid MDDefaultValue. We can't return a managed object via the normal out parameter // because it won't be GC protected, so in this special case null the output parameter and return // the string via the protected return result (which is null for all other cases). if (value.m_bType == ELEMENT_TYPE_STRING) { HELPER_METHOD_FRAME_BEGIN_RET_0(); *pDefaultValue = 0; STRINGREF refRetval = StringObject::NewString(value.m_wzValue, value.m_cbSize / sizeof(WCHAR)); pRetVal = STRINGREFToObject(refRetval); HELPER_METHOD_FRAME_END(); } else { *pDefaultValue = value.m_ullValue; } *pCorElementType = (UINT32)value.m_bType; *pLength = (INT32)value.m_cbSize; ErrExit: if (FAILED(hr)) { FCThrow(kBadImageFormatException); } return pRetVal; } FCIMPLEND MDImpl3(void, MetaDataImport::GetCustomAttributeProps, mdCustomAttribute cv, mdToken* ptkType, ConstArray* ppBlob) { FCALL_CONTRACT; HRESULT hr = S_OK; IMDInternalImport *_pScope = pScope; IfFailGo(_pScope->GetCustomAttributeProps(cv, ptkType)); IfFailGo(_pScope->GetCustomAttributeAsBlob(cv, (const void **)&ppBlob->m_array, (ULONG *)&ppBlob->m_count)); ErrExit: if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND static int * EnsureResultSize(MetadataEnumResult * pResult, ULONG length) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; int * p; if (length >= ARRAY_SIZE(pResult->smallResult) || DbgRandomOnExe(.01)) { pResult->largeResult = (I4Array *)OBJECTREFToObject(AllocatePrimitiveArray(ELEMENT_TYPE_I4, length)); p = pResult->largeResult->GetDirectPointerToNonObjectElements(); } else { ZeroMemory(pResult->smallResult, sizeof(pResult->smallResult)); pResult->largeResult = NULL; p = pResult->smallResult; } pResult->length = length; return p; } MDImpl3(void, MetaDataImport::Enum, mdToken type, mdToken tkParent, MetadataEnumResult * pResult) { CONTRACTL { FCALL_CHECK; PRECONDITION(pResult != NULL); } CONTRACTL_END; HELPER_METHOD_FRAME_BEGIN_0(); { IMDInternalImport *_pScope = pScope; if (type == mdtTypeDef) { ULONG nestedClassesCount; IfFailThrow(_pScope->GetCountNestedClasses(tkParent, &nestedClassesCount)); mdTypeDef* arToken = (mdTypeDef*)EnsureResultSize(pResult, nestedClassesCount); IfFailThrow(_pScope->GetNestedClasses(tkParent, arToken, nestedClassesCount, &nestedClassesCount)); } else if (type == mdtMethodDef && (TypeFromToken(tkParent) == mdtProperty || TypeFromToken(tkParent) == mdtEvent)) { HENUMInternalHolder hEnum(pScope); hEnum.EnumAssociateInit(tkParent); ULONG associatesCount = hEnum.EnumGetCount(); static_assert_no_msg(sizeof(ASSOCIATE_RECORD) == 2 * sizeof(int)); ASSOCIATE_RECORD* arAssocRecord = (ASSOCIATE_RECORD*)EnsureResultSize(pResult, 2 * associatesCount); IfFailThrow(_pScope->GetAllAssociates(&hEnum, arAssocRecord, associatesCount)); } else { HENUMInternalHolder hEnum(pScope); hEnum.EnumInit(type, tkParent); ULONG count = hEnum.EnumGetCount(); mdToken* arToken = (mdToken*)EnsureResultSize(pResult, count); for(COUNT_T i = 0; i < count && _pScope->EnumNext(&hEnum, &arToken[i]); i++); } } HELPER_METHOD_FRAME_END(); } FCIMPLEND #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #endif MDImpl1(FC_BOOL_RET, MetaDataImport::IsValidToken, mdToken tk) { FCALL_CONTRACT; IMDInternalImport *_pScope = pScope; FC_RETURN_BOOL(_pScope->IsValidToken(tk)); } FCIMPLEND MDImpl3(void, MetaDataImport::GetClassLayout, mdTypeDef td, DWORD* pdwPackSize, ULONG* pulClassSize) { FCALL_CONTRACT; HRESULT hr = S_OK; { IMDInternalImport *_pScope = pScope; if (pdwPackSize != NULL) { hr = _pScope->GetClassPackSize(td, (ULONG *)pdwPackSize); if (hr == CLDB_E_RECORD_NOTFOUND) { *pdwPackSize = 0; hr = S_OK; } IfFailGo(hr); } if (pulClassSize != NULL) { hr = _pScope->GetClassTotalSize(td, pulClassSize); if (hr == CLDB_E_RECORD_NOTFOUND) { *pulClassSize = 0; hr = S_OK; } IfFailGo(hr); } } ErrExit: if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl3(FC_BOOL_RET, MetaDataImport::GetFieldOffset, mdTypeDef td, mdFieldDef target, DWORD* pdwFieldOffset) { FCALL_CONTRACT; HRESULT hr = S_OK; IMDInternalImport *_pScope = pScope; MD_CLASS_LAYOUT layout; BOOL retVal = FALSE; IfFailGo(_pScope->GetClassLayoutInit(td, &layout)); ULONG cFieldOffset; cFieldOffset = layout.m_ridFieldEnd - layout.m_ridFieldCur; for (COUNT_T i = 0; i < cFieldOffset; i ++) { mdFieldDef fd; ULONG offset; IfFailGo(_pScope->GetClassLayoutNext(&layout, &fd, &offset)); if (fd == target) { *pdwFieldOffset = offset; retVal = TRUE; break; } } ErrExit: if (FAILED(hr)) { FCThrow(kBadImageFormatException); } FC_RETURN_BOOL(retVal); } FCIMPLEND MDImpl3(void, MetaDataImport::GetUserString, mdToken tk, LPCSTR* pszName, ULONG* pCount) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; BOOL bHasExtendedChars; hr = _pScope->GetUserString(tk, pCount, &bHasExtendedChars, (LPCWSTR *)pszName); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetName, mdToken tk, LPCSTR* pszName) { FCALL_CONTRACT; HRESULT hr = S_OK; IMDInternalImport *_pScope = pScope; if (TypeFromToken(tk) == mdtMethodDef) { hr = _pScope->GetNameOfMethodDef(tk, pszName); } else if (TypeFromToken(tk) == mdtParamDef) { USHORT seq; DWORD attr; hr = _pScope->GetParamDefProps(tk, &seq, &attr, pszName); } else if (TypeFromToken(tk) == mdtFieldDef) { hr = _pScope->GetNameOfFieldDef(tk, pszName); } else if (TypeFromToken(tk) == mdtProperty) { hr = _pScope->GetPropertyProps(tk, pszName, NULL, NULL, NULL); } else if (TypeFromToken(tk) == mdtEvent) { hr = _pScope->GetEventProps(tk, pszName, NULL, NULL); } else if (TypeFromToken(tk) == mdtModule) { hr = _pScope->GetModuleRefProps(tk, pszName); } else if (TypeFromToken(tk) == mdtTypeDef) { LPCSTR szNamespace = NULL; hr = _pScope->GetNameOfTypeDef(tk, pszName, &szNamespace); } else { hr = E_FAIL; } if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetNamespace, mdToken tk, LPCSTR* pszName) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; LPCSTR szName = NULL; hr = _pScope->GetNameOfTypeDef(tk, &szName, pszName); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetGenericParamProps, mdToken tk, DWORD* pAttributes) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetGenericParamProps(tk, NULL, pAttributes, NULL, NULL, NULL); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl3(void, MetaDataImport::GetEventProps, mdToken tk, LPCSTR* pszName, INT32 *pdwEventFlags) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetEventProps(tk, pszName, (DWORD*)pdwEventFlags, NULL); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl4(void, MetaDataImport::GetPinvokeMap, mdToken tk, DWORD* pMappingFlags, LPCSTR* pszImportName, LPCSTR* pszImportDll) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; mdModule tkModule; hr = _pScope->GetPinvokeMap(tk, pMappingFlags, pszImportName, &tkModule); if (FAILED(hr)) { *pMappingFlags = 0; *pszImportName = NULL; *pszImportDll = NULL; hr = S_OK; } else { hr = _pScope->GetModuleRefProps(tkModule, pszImportDll); } if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl3(void, MetaDataImport::GetParamDefProps, mdToken tk, INT32* pSequence, INT32* pAttributes) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; USHORT usSequence = 0; // Is this a valid token? if (_pScope->IsValidToken((mdParamDef)tk)) { LPCSTR szParamName; hr = _pScope->GetParamDefProps(tk, &usSequence, (DWORD *)pAttributes, &szParamName); } else { // Invalid token - throw an exception hr = COR_E_BADIMAGEFORMAT; } *pSequence = (INT32) usSequence; if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetFieldDefProps, mdToken tk, INT32 *pdwFieldFlags) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetFieldDefProps(tk, (DWORD *)pdwFieldFlags); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl4(void, MetaDataImport::GetPropertyProps, mdToken tk, LPCSTR* pszName, INT32 *pdwPropertyFlags, ConstArray* ppValue) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetPropertyProps(tk, pszName, (DWORD*)pdwPropertyFlags, (PCCOR_SIGNATURE*)&ppValue->m_array, (ULONG*)&ppValue->m_count); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetFieldMarshal, mdToken tk, ConstArray* ppValue) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetFieldMarshal(tk, (PCCOR_SIGNATURE *)&ppValue->m_array, (ULONG *)&ppValue->m_count); if (hr == CLDB_E_RECORD_NOTFOUND) { ppValue->m_array = NULL; ppValue->m_count = 0; hr = S_OK; } if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetSigOfMethodDef, mdToken tk, ConstArray* ppValue) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetSigOfMethodDef(tk, (ULONG*)&ppValue->m_count, (PCCOR_SIGNATURE *)&ppValue->m_array); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetSignatureFromToken, mdToken tk, ConstArray* ppValue) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetSigFromToken(tk, (ULONG*)&ppValue->m_count, (PCCOR_SIGNATURE *)&(ppValue->m_array)); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetSigOfFieldDef, mdToken tk, ConstArray* ppValue) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetSigOfFieldDef(tk, (ULONG*)&ppValue->m_count, (PCCOR_SIGNATURE *)&ppValue->m_array); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetParentToken, mdToken tk, mdToken* ptk) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; switch (TypeFromToken(tk)) { case mdtTypeDef: hr = _pScope->GetNestedClassProps(tk, ptk); if (hr == CLDB_E_RECORD_NOTFOUND) { *ptk = mdTypeDefNil; hr = S_OK; } break; case mdtGenericParam: hr = _pScope->GetGenericParamProps(tk, NULL, NULL, ptk, NULL, NULL); break; case mdtMethodDef: case mdtMethodSpec: case mdtFieldDef: case mdtParamDef: case mdtMemberRef: case mdtCustomAttribute: case mdtEvent: case mdtProperty: hr = _pScope->GetParentToken(tk, ptk); break; default: hr = COR_E_BADIMAGEFORMAT; break; } if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl1(void, MetaDataImport::GetScopeProps, GUID* pmvid) { FCALL_CONTRACT; HRESULT hr; LPCSTR szName; IMDInternalImport *_pScope = pScope; hr = _pScope->GetScopeProps(&szName, pmvid); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetMemberRefProps, mdMemberRef mr, ConstArray* ppvSigBlob) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; LPCSTR szName_Ignore; hr = _pScope->GetNameAndSigOfMemberRef(mr, (PCCOR_SIGNATURE*)&ppvSigBlob->m_array, (ULONG*)&ppvSigBlob->m_count, &szName_Ignore); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // restore command line optimization defaults #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" #include "mlinfo.h" #include "managedmdimport.hpp" #include "wrappers.h" void ThrowMetaDataImportException(HRESULT hr) { WRAPPER_NO_CONTRACT; if (hr == CLDB_E_RECORD_NOTFOUND) return; MethodDescCallSite throwError(METHOD__METADATA_IMPORT__THROW_ERROR); ARG_SLOT args[] = { (ARG_SLOT)hr }; throwError.Call(args); } // // MetaDataImport // extern BOOL ParseNativeTypeInfo(NativeTypeParamInfo* pInfo, PCCOR_SIGNATURE pvNativeType, ULONG cbNativeType); FCIMPL11(void, MetaDataImport::GetMarshalAs, BYTE* pvNativeType, ULONG cbNativeType, INT32* unmanagedType, INT32* safeArraySubType, STRINGREF* safeArrayUserDefinedSubType, INT32* arraySubType, INT32* sizeParamIndex, INT32* sizeConst, STRINGREF* marshalType, STRINGREF* marshalCookie, INT32* iidParamIndex) { FCALL_CONTRACT; HELPER_METHOD_FRAME_BEGIN_0(); { NativeTypeParamInfo info; ZeroMemory(&info, sizeof(NativeTypeParamInfo)); if (!ParseNativeTypeInfo(&info, pvNativeType, cbNativeType)) { ThrowMetaDataImportException(E_FAIL); } *unmanagedType = info.m_NativeType; *sizeParamIndex = info.m_CountParamIdx; *sizeConst = info.m_Additive; *arraySubType = info.m_ArrayElementType; #ifdef FEATURE_COMINTEROP *iidParamIndex = info.m_IidParamIndex; *safeArraySubType = info.m_SafeArrayElementVT; *safeArrayUserDefinedSubType = info.m_strSafeArrayUserDefTypeName == NULL ? NULL : StringObject::NewString(info.m_strSafeArrayUserDefTypeName, info.m_cSafeArrayUserDefTypeNameBytes); #else *iidParamIndex = 0; *safeArraySubType = VT_EMPTY; *safeArrayUserDefinedSubType = NULL; #endif *marshalType = info.m_strCMMarshalerTypeName == NULL ? NULL : StringObject::NewString(info.m_strCMMarshalerTypeName, info.m_cCMMarshalerTypeNameBytes); *marshalCookie = info.m_strCMCookie == NULL ? NULL : StringObject::NewString(info.m_strCMCookie, info.m_cCMCookieStrBytes); } HELPER_METHOD_FRAME_END(); } FCIMPLEND MDImpl4(Object *, MetaDataImport::GetDefaultValue, mdToken tk, INT64* pDefaultValue, INT32* pLength, INT32* pCorElementType) { FCALL_CONTRACT; HRESULT hr = S_OK; Object *pRetVal = NULL; IMDInternalImport *_pScope = pScope; MDDefaultValue value; IfFailGo(_pScope->GetDefaultValue(tk, &value)); // We treat string values differently. That's because on big-endian architectures we can't return a // pointer to static string data in the metadata, we have to buffer the string in order to byte-swap // all the unicode characters. MDDefaultValue therefore has a destructor on big-endian machines which // reclaims this buffer, implying we can't safely return the embedded pointer to managed code. // The easiest thing for us to do is to construct the managed string object here, in the context of // the still valid MDDefaultValue. We can't return a managed object via the normal out parameter // because it won't be GC protected, so in this special case null the output parameter and return // the string via the protected return result (which is null for all other cases). if (value.m_bType == ELEMENT_TYPE_STRING) { HELPER_METHOD_FRAME_BEGIN_RET_0(); *pDefaultValue = 0; STRINGREF refRetval = StringObject::NewString(value.m_wzValue, value.m_cbSize / sizeof(WCHAR)); pRetVal = STRINGREFToObject(refRetval); HELPER_METHOD_FRAME_END(); } else { *pDefaultValue = value.m_ullValue; } *pCorElementType = (UINT32)value.m_bType; *pLength = (INT32)value.m_cbSize; ErrExit: if (FAILED(hr)) { FCThrow(kBadImageFormatException); } return pRetVal; } FCIMPLEND MDImpl3(void, MetaDataImport::GetCustomAttributeProps, mdCustomAttribute cv, mdToken* ptkType, ConstArray* ppBlob) { FCALL_CONTRACT; HRESULT hr = S_OK; IMDInternalImport *_pScope = pScope; IfFailGo(_pScope->GetCustomAttributeProps(cv, ptkType)); IfFailGo(_pScope->GetCustomAttributeAsBlob(cv, (const void **)&ppBlob->m_array, (ULONG *)&ppBlob->m_count)); ErrExit: if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND static int * EnsureResultSize(MetadataEnumResult * pResult, ULONG length) { CONTRACTL { THROWS; GC_TRIGGERS; MODE_COOPERATIVE; } CONTRACTL_END; int * p; if (length >= ARRAY_SIZE(pResult->smallResult) || DbgRandomOnExe(.01)) { pResult->largeResult = (I4Array *)OBJECTREFToObject(AllocatePrimitiveArray(ELEMENT_TYPE_I4, length)); p = pResult->largeResult->GetDirectPointerToNonObjectElements(); } else { ZeroMemory(pResult->smallResult, sizeof(pResult->smallResult)); pResult->largeResult = NULL; p = pResult->smallResult; } pResult->length = length; return p; } MDImpl3(void, MetaDataImport::Enum, mdToken type, mdToken tkParent, MetadataEnumResult * pResult) { CONTRACTL { FCALL_CHECK; PRECONDITION(pResult != NULL); } CONTRACTL_END; HELPER_METHOD_FRAME_BEGIN_0(); { IMDInternalImport *_pScope = pScope; if (type == mdtTypeDef) { ULONG nestedClassesCount; IfFailThrow(_pScope->GetCountNestedClasses(tkParent, &nestedClassesCount)); mdTypeDef* arToken = (mdTypeDef*)EnsureResultSize(pResult, nestedClassesCount); IfFailThrow(_pScope->GetNestedClasses(tkParent, arToken, nestedClassesCount, &nestedClassesCount)); } else if (type == mdtMethodDef && (TypeFromToken(tkParent) == mdtProperty || TypeFromToken(tkParent) == mdtEvent)) { HENUMInternalHolder hEnum(pScope); hEnum.EnumAssociateInit(tkParent); ULONG associatesCount = hEnum.EnumGetCount(); static_assert_no_msg(sizeof(ASSOCIATE_RECORD) == 2 * sizeof(int)); ASSOCIATE_RECORD* arAssocRecord = (ASSOCIATE_RECORD*)EnsureResultSize(pResult, 2 * associatesCount); IfFailThrow(_pScope->GetAllAssociates(&hEnum, arAssocRecord, associatesCount)); } else { HENUMInternalHolder hEnum(pScope); hEnum.EnumInit(type, tkParent); ULONG count = hEnum.EnumGetCount(); mdToken* arToken = (mdToken*)EnsureResultSize(pResult, count); for(COUNT_T i = 0; i < count && _pScope->EnumNext(&hEnum, &arToken[i]); i++); } } HELPER_METHOD_FRAME_END(); } FCIMPLEND #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("y", on) // Small critical routines, don't put in EBP frame #endif MDImpl1(FC_BOOL_RET, MetaDataImport::IsValidToken, mdToken tk) { FCALL_CONTRACT; IMDInternalImport *_pScope = pScope; FC_RETURN_BOOL(_pScope->IsValidToken(tk)); } FCIMPLEND MDImpl3(void, MetaDataImport::GetClassLayout, mdTypeDef td, DWORD* pdwPackSize, ULONG* pulClassSize) { FCALL_CONTRACT; HRESULT hr = S_OK; { IMDInternalImport *_pScope = pScope; if (pdwPackSize != NULL) { hr = _pScope->GetClassPackSize(td, (ULONG *)pdwPackSize); if (hr == CLDB_E_RECORD_NOTFOUND) { *pdwPackSize = 0; hr = S_OK; } IfFailGo(hr); } if (pulClassSize != NULL) { hr = _pScope->GetClassTotalSize(td, pulClassSize); if (hr == CLDB_E_RECORD_NOTFOUND) { *pulClassSize = 0; hr = S_OK; } IfFailGo(hr); } } ErrExit: if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl3(FC_BOOL_RET, MetaDataImport::GetFieldOffset, mdTypeDef td, mdFieldDef target, DWORD* pdwFieldOffset) { FCALL_CONTRACT; HRESULT hr = S_OK; IMDInternalImport *_pScope = pScope; MD_CLASS_LAYOUT layout; BOOL retVal = FALSE; IfFailGo(_pScope->GetClassLayoutInit(td, &layout)); ULONG cFieldOffset; cFieldOffset = layout.m_ridFieldEnd - layout.m_ridFieldCur; for (COUNT_T i = 0; i < cFieldOffset; i ++) { mdFieldDef fd; ULONG offset; IfFailGo(_pScope->GetClassLayoutNext(&layout, &fd, &offset)); if (fd == target) { *pdwFieldOffset = offset; retVal = TRUE; break; } } ErrExit: if (FAILED(hr)) { FCThrow(kBadImageFormatException); } FC_RETURN_BOOL(retVal); } FCIMPLEND MDImpl3(void, MetaDataImport::GetUserString, mdToken tk, LPCSTR* pszName, ULONG* pCount) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; BOOL bHasExtendedChars; hr = _pScope->GetUserString(tk, pCount, &bHasExtendedChars, (LPCWSTR *)pszName); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetName, mdToken tk, LPCSTR* pszName) { FCALL_CONTRACT; HRESULT hr = S_OK; IMDInternalImport *_pScope = pScope; if (TypeFromToken(tk) == mdtMethodDef) { hr = _pScope->GetNameOfMethodDef(tk, pszName); } else if (TypeFromToken(tk) == mdtParamDef) { USHORT seq; DWORD attr; hr = _pScope->GetParamDefProps(tk, &seq, &attr, pszName); } else if (TypeFromToken(tk) == mdtFieldDef) { hr = _pScope->GetNameOfFieldDef(tk, pszName); } else if (TypeFromToken(tk) == mdtProperty) { hr = _pScope->GetPropertyProps(tk, pszName, NULL, NULL, NULL); } else if (TypeFromToken(tk) == mdtEvent) { hr = _pScope->GetEventProps(tk, pszName, NULL, NULL); } else if (TypeFromToken(tk) == mdtModule) { hr = _pScope->GetModuleRefProps(tk, pszName); } else if (TypeFromToken(tk) == mdtTypeDef) { LPCSTR szNamespace = NULL; hr = _pScope->GetNameOfTypeDef(tk, pszName, &szNamespace); } else { hr = E_FAIL; } if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetNamespace, mdToken tk, LPCSTR* pszName) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; LPCSTR szName = NULL; hr = _pScope->GetNameOfTypeDef(tk, &szName, pszName); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetGenericParamProps, mdToken tk, DWORD* pAttributes) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetGenericParamProps(tk, NULL, pAttributes, NULL, NULL, NULL); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl3(void, MetaDataImport::GetEventProps, mdToken tk, LPCSTR* pszName, INT32 *pdwEventFlags) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetEventProps(tk, pszName, (DWORD*)pdwEventFlags, NULL); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl4(void, MetaDataImport::GetPinvokeMap, mdToken tk, DWORD* pMappingFlags, LPCSTR* pszImportName, LPCSTR* pszImportDll) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; mdModule tkModule; hr = _pScope->GetPinvokeMap(tk, pMappingFlags, pszImportName, &tkModule); if (FAILED(hr)) { *pMappingFlags = 0; *pszImportName = NULL; *pszImportDll = NULL; hr = S_OK; } else { hr = _pScope->GetModuleRefProps(tkModule, pszImportDll); } if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl3(void, MetaDataImport::GetParamDefProps, mdToken tk, INT32* pSequence, INT32* pAttributes) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; USHORT usSequence = 0; // Is this a valid token? if (_pScope->IsValidToken((mdParamDef)tk)) { LPCSTR szParamName; hr = _pScope->GetParamDefProps(tk, &usSequence, (DWORD *)pAttributes, &szParamName); } else { // Invalid token - throw an exception hr = COR_E_BADIMAGEFORMAT; } *pSequence = (INT32) usSequence; if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetFieldDefProps, mdToken tk, INT32 *pdwFieldFlags) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetFieldDefProps(tk, (DWORD *)pdwFieldFlags); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl4(void, MetaDataImport::GetPropertyProps, mdToken tk, LPCSTR* pszName, INT32 *pdwPropertyFlags, ConstArray* ppValue) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetPropertyProps(tk, pszName, (DWORD*)pdwPropertyFlags, (PCCOR_SIGNATURE*)&ppValue->m_array, (ULONG*)&ppValue->m_count); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetFieldMarshal, mdToken tk, ConstArray* ppValue) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetFieldMarshal(tk, (PCCOR_SIGNATURE *)&ppValue->m_array, (ULONG *)&ppValue->m_count); if (hr == CLDB_E_RECORD_NOTFOUND) { ppValue->m_array = NULL; ppValue->m_count = 0; hr = S_OK; } if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetSigOfMethodDef, mdToken tk, ConstArray* ppValue) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetSigOfMethodDef(tk, (ULONG*)&ppValue->m_count, (PCCOR_SIGNATURE *)&ppValue->m_array); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetSignatureFromToken, mdToken tk, ConstArray* ppValue) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetSigFromToken(tk, (ULONG*)&ppValue->m_count, (PCCOR_SIGNATURE *)&(ppValue->m_array)); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetSigOfFieldDef, mdToken tk, ConstArray* ppValue) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; hr = _pScope->GetSigOfFieldDef(tk, (ULONG*)&ppValue->m_count, (PCCOR_SIGNATURE *)&ppValue->m_array); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetParentToken, mdToken tk, mdToken* ptk) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; switch (TypeFromToken(tk)) { case mdtTypeDef: hr = _pScope->GetNestedClassProps(tk, ptk); if (hr == CLDB_E_RECORD_NOTFOUND) { *ptk = mdTypeDefNil; hr = S_OK; } break; case mdtGenericParam: hr = _pScope->GetGenericParamProps(tk, NULL, NULL, ptk, NULL, NULL); break; case mdtMethodDef: case mdtMethodSpec: case mdtFieldDef: case mdtParamDef: case mdtMemberRef: case mdtCustomAttribute: case mdtEvent: case mdtProperty: hr = _pScope->GetParentToken(tk, ptk); break; default: hr = COR_E_BADIMAGEFORMAT; break; } if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl1(void, MetaDataImport::GetScopeProps, GUID* pmvid) { FCALL_CONTRACT; HRESULT hr; LPCSTR szName; IMDInternalImport *_pScope = pScope; hr = _pScope->GetScopeProps(&szName, pmvid); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND MDImpl2(void, MetaDataImport::GetMemberRefProps, mdMemberRef mr, ConstArray* ppvSigBlob) { FCALL_CONTRACT; HRESULT hr; IMDInternalImport *_pScope = pScope; LPCSTR szName_Ignore; hr = _pScope->GetNameAndSigOfMemberRef(mr, (PCCOR_SIGNATURE*)&ppvSigBlob->m_array, (ULONG*)&ppvSigBlob->m_count, &szName_Ignore); if (FAILED(hr)) { FCThrowVoid(kBadImageFormatException); } } FCIMPLEND #if defined(_MSC_VER) && defined(TARGET_X86) #pragma optimize("", on) // restore command line optimization defaults #endif
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/debug/daccess/s390x/primitives.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include "stdafx.h" #include "../../shared/s390x/primitives.cpp"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // #include "stdafx.h" #include "../../shared/s390x/primitives.cpp"
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/jit/opcode.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX opcodes.h XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _OPCODE_H_ #define _OPCODE_H_ #include "openum.h" extern const signed char opcodeSizes[]; #if defined(DEBUG) extern const char* const opcodeNames[]; extern const BYTE opcodeArgKinds[]; #endif /*****************************************************************************/ #endif // _OPCODE_H_ /*****************************************************************************/
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX opcodes.h XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /*****************************************************************************/ #ifndef _OPCODE_H_ #define _OPCODE_H_ #include "openum.h" extern const signed char opcodeSizes[]; #if defined(DEBUG) extern const char* const opcodeNames[]; extern const BYTE opcodeArgKinds[]; #endif /*****************************************************************************/ #endif // _OPCODE_H_ /*****************************************************************************/
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/tests/palsuite/c_runtime/strncat/test1/test1.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test1.c ** ** Purpose: ** Concatenate a few strings together, setting different lengths to be ** used for each one. Check to ensure the pointers which are returned are ** correct, and that the final string is what was expected. ** ** **==========================================================================*/ #include <palsuite.h> PALTEST(c_runtime_strncat_test1_paltest_strncat_test1, "c_runtime/strncat/test1/paltest_strncat_test1") { char dest[80]; char *test = "foo barbaz"; char *str1 = "foo "; char *str2 = "bar "; char *str3 = "baz"; char *ptr; int i; if (PAL_Initialize(argc, argv)) { return FAIL; } dest[0] = 0; for (i=1; i<80; i++) { dest[i] = 'x'; } ptr = strncat(dest, str1, strlen(str1)); if (ptr != dest) { Fail("ERROR: Expected strncat to return ptr to %p, got %p", dest, ptr); } ptr = strncat(dest, str2, 3); if (ptr != dest) { Fail("ERROR: Expected strncat to return ptr to %p, got %p", dest, ptr); } if (dest[7] != 0) { Fail("ERROR: strncat did not place a terminating NULL!"); } ptr = strncat(dest, str3, 20); if (ptr != dest) { Fail("ERROR: Expected strncat to return ptr to %p, got %p", dest, ptr); } if (strcmp(dest, test) != 0) { Fail("ERROR: Expected strncat to give \"%s\", got \"%s\"\n", test, dest); } if (dest[strlen(test)+1] != 'x') { Fail("strncat went out of bounds!\n"); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================================ ** ** Source: test1.c ** ** Purpose: ** Concatenate a few strings together, setting different lengths to be ** used for each one. Check to ensure the pointers which are returned are ** correct, and that the final string is what was expected. ** ** **==========================================================================*/ #include <palsuite.h> PALTEST(c_runtime_strncat_test1_paltest_strncat_test1, "c_runtime/strncat/test1/paltest_strncat_test1") { char dest[80]; char *test = "foo barbaz"; char *str1 = "foo "; char *str2 = "bar "; char *str3 = "baz"; char *ptr; int i; if (PAL_Initialize(argc, argv)) { return FAIL; } dest[0] = 0; for (i=1; i<80; i++) { dest[i] = 'x'; } ptr = strncat(dest, str1, strlen(str1)); if (ptr != dest) { Fail("ERROR: Expected strncat to return ptr to %p, got %p", dest, ptr); } ptr = strncat(dest, str2, 3); if (ptr != dest) { Fail("ERROR: Expected strncat to return ptr to %p, got %p", dest, ptr); } if (dest[7] != 0) { Fail("ERROR: strncat did not place a terminating NULL!"); } ptr = strncat(dest, str3, 20); if (ptr != dest) { Fail("ERROR: Expected strncat to return ptr to %p, got %p", dest, ptr); } if (strcmp(dest, test) != 0) { Fail("ERROR: Expected strncat to give \"%s\", got \"%s\"\n", test, dest); } if (dest[strlen(test)+1] != 'x') { Fail("strncat went out of bounds!\n"); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/native/libs/System.Security.Cryptography.Native/apibridge.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Functions based on OpenSSL 1.1 API, used when building against/running with OpenSSL 1.0 #pragma once #include "pal_types.h" int local_BIO_up_ref(BIO *a); const BIGNUM* local_DSA_get0_key(const DSA* dsa, const BIGNUM** pubKey, const BIGNUM** privKey); void local_DSA_get0_pqg(const DSA* dsa, const BIGNUM** p, const BIGNUM** q, const BIGNUM** g); const DSA_METHOD* local_DSA_get_method(const DSA* dsa); int32_t local_DSA_set0_key(DSA* dsa, BIGNUM* bnY, BIGNUM* bnX); int32_t local_DSA_set0_pqg(DSA* dsa, BIGNUM* bnP, BIGNUM* bnQ, BIGNUM* bnG); void local_EVP_CIPHER_CTX_free(EVP_CIPHER_CTX* ctx); EVP_CIPHER_CTX* local_EVP_CIPHER_CTX_new(void); int32_t local_EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX* ctx); int local_EVP_PKEY_check(EVP_PKEY_CTX* ctx); RSA* local_EVP_PKEY_get0_RSA(EVP_PKEY* pkey); int local_EVP_PKEY_public_check(EVP_PKEY_CTX* ctx); int32_t local_EVP_PKEY_up_ref(EVP_PKEY* pkey); void local_HMAC_CTX_free(HMAC_CTX* ctx); HMAC_CTX* local_HMAC_CTX_new(void); long local_OpenSSL_version_num(void); void local_RSA_get0_crt_params(const RSA* rsa, const BIGNUM** dmp1, const BIGNUM** dmq1, const BIGNUM** iqmp); void local_RSA_get0_factors(const RSA* rsa, const BIGNUM** p, const BIGNUM** q); void local_RSA_get0_key(const RSA* rsa, const BIGNUM** n, const BIGNUM** e, const BIGNUM** d); int32_t local_RSA_meth_get_flags(const RSA_METHOD* meth); int32_t local_RSA_set0_crt_params(RSA* rsa, BIGNUM* dmp1, BIGNUM* dmq1, BIGNUM* iqmp); int32_t local_RSA_set0_factors(RSA* rsa, BIGNUM* p, BIGNUM* q); int32_t local_RSA_set0_key(RSA* rsa, BIGNUM* n, BIGNUM* e, BIGNUM* d); int local_RSA_test_flags(const RSA *r, int flags); int32_t local_RSA_pkey_ctx_ctrl(EVP_PKEY_CTX* ctx, int32_t optype, int32_t cmd, int32_t p1, void* p2); int32_t local_SSL_is_init_finished(const SSL* ssl); int32_t local_SSL_CTX_config(SSL_CTX* ctx, const char* name); unsigned long local_SSL_CTX_set_options(SSL_CTX* ctx, unsigned long options); unsigned long local_SSL_set_options(SSL* ssl, unsigned long options); void local_SSL_CTX_set_security_level(SSL_CTX* ctx, int32_t level); void local_SSL_set_security_level(SSL* ssl, int32_t level); int local_SSL_session_reused(SSL* ssl); int32_t local_X509_check_host(X509* x509, const char* name, size_t namelen, unsigned int flags, char** peername); const ASN1_TIME* local_X509_CRL_get0_nextUpdate(const X509_CRL* crl); int32_t local_X509_NAME_get0_der(X509_NAME* x509Name, const uint8_t** pder, size_t* pderlen); int32_t local_X509_PUBKEY_get0_param( ASN1_OBJECT** palgOid, const uint8_t** pkeyBytes, int* pkeyBytesLen, X509_ALGOR** palg, X509_PUBKEY* pubkey); STACK_OF(X509)* local_X509_STORE_CTX_get0_chain(X509_STORE_CTX* ctx); X509* local_X509_STORE_CTX_get0_cert(X509_STORE_CTX* ctx); X509_STORE* local_X509_STORE_CTX_get0_store(X509_STORE_CTX* ctx); STACK_OF(X509)* local_X509_STORE_CTX_get0_untrusted(X509_STORE_CTX* ctx); X509_VERIFY_PARAM* local_X509_STORE_get0_param(X509_STORE* ctx); const ASN1_TIME* local_X509_get0_notAfter(const X509* x509); const ASN1_TIME* local_X509_get0_notBefore(const X509* x509); ASN1_BIT_STRING* local_X509_get0_pubkey_bitstr(const X509* x509); int local_X509_set1_notBefore(X509* x509, const ASN1_TIME*); int local_X509_set1_notAfter(X509* x509, const ASN1_TIME*); const X509_ALGOR* local_X509_get0_tbs_sigalg(const X509* x509); X509_PUBKEY* local_X509_get_X509_PUBKEY(const X509* x509); int32_t local_X509_get_version(const X509* x509); int32_t local_X509_up_ref(X509* x509);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Functions based on OpenSSL 1.1 API, used when building against/running with OpenSSL 1.0 #pragma once #include "pal_types.h" int local_BIO_up_ref(BIO *a); const BIGNUM* local_DSA_get0_key(const DSA* dsa, const BIGNUM** pubKey, const BIGNUM** privKey); void local_DSA_get0_pqg(const DSA* dsa, const BIGNUM** p, const BIGNUM** q, const BIGNUM** g); const DSA_METHOD* local_DSA_get_method(const DSA* dsa); int32_t local_DSA_set0_key(DSA* dsa, BIGNUM* bnY, BIGNUM* bnX); int32_t local_DSA_set0_pqg(DSA* dsa, BIGNUM* bnP, BIGNUM* bnQ, BIGNUM* bnG); void local_EVP_CIPHER_CTX_free(EVP_CIPHER_CTX* ctx); EVP_CIPHER_CTX* local_EVP_CIPHER_CTX_new(void); int32_t local_EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX* ctx); int local_EVP_PKEY_check(EVP_PKEY_CTX* ctx); RSA* local_EVP_PKEY_get0_RSA(EVP_PKEY* pkey); int local_EVP_PKEY_public_check(EVP_PKEY_CTX* ctx); int32_t local_EVP_PKEY_up_ref(EVP_PKEY* pkey); void local_HMAC_CTX_free(HMAC_CTX* ctx); HMAC_CTX* local_HMAC_CTX_new(void); long local_OpenSSL_version_num(void); void local_RSA_get0_crt_params(const RSA* rsa, const BIGNUM** dmp1, const BIGNUM** dmq1, const BIGNUM** iqmp); void local_RSA_get0_factors(const RSA* rsa, const BIGNUM** p, const BIGNUM** q); void local_RSA_get0_key(const RSA* rsa, const BIGNUM** n, const BIGNUM** e, const BIGNUM** d); int32_t local_RSA_meth_get_flags(const RSA_METHOD* meth); int32_t local_RSA_set0_crt_params(RSA* rsa, BIGNUM* dmp1, BIGNUM* dmq1, BIGNUM* iqmp); int32_t local_RSA_set0_factors(RSA* rsa, BIGNUM* p, BIGNUM* q); int32_t local_RSA_set0_key(RSA* rsa, BIGNUM* n, BIGNUM* e, BIGNUM* d); int local_RSA_test_flags(const RSA *r, int flags); int32_t local_RSA_pkey_ctx_ctrl(EVP_PKEY_CTX* ctx, int32_t optype, int32_t cmd, int32_t p1, void* p2); int32_t local_SSL_is_init_finished(const SSL* ssl); int32_t local_SSL_CTX_config(SSL_CTX* ctx, const char* name); unsigned long local_SSL_CTX_set_options(SSL_CTX* ctx, unsigned long options); unsigned long local_SSL_set_options(SSL* ssl, unsigned long options); void local_SSL_CTX_set_security_level(SSL_CTX* ctx, int32_t level); void local_SSL_set_security_level(SSL* ssl, int32_t level); int local_SSL_session_reused(SSL* ssl); int32_t local_X509_check_host(X509* x509, const char* name, size_t namelen, unsigned int flags, char** peername); const ASN1_TIME* local_X509_CRL_get0_nextUpdate(const X509_CRL* crl); int32_t local_X509_NAME_get0_der(X509_NAME* x509Name, const uint8_t** pder, size_t* pderlen); int32_t local_X509_PUBKEY_get0_param( ASN1_OBJECT** palgOid, const uint8_t** pkeyBytes, int* pkeyBytesLen, X509_ALGOR** palg, X509_PUBKEY* pubkey); STACK_OF(X509)* local_X509_STORE_CTX_get0_chain(X509_STORE_CTX* ctx); X509* local_X509_STORE_CTX_get0_cert(X509_STORE_CTX* ctx); X509_STORE* local_X509_STORE_CTX_get0_store(X509_STORE_CTX* ctx); STACK_OF(X509)* local_X509_STORE_CTX_get0_untrusted(X509_STORE_CTX* ctx); X509_VERIFY_PARAM* local_X509_STORE_get0_param(X509_STORE* ctx); const ASN1_TIME* local_X509_get0_notAfter(const X509* x509); const ASN1_TIME* local_X509_get0_notBefore(const X509* x509); ASN1_BIT_STRING* local_X509_get0_pubkey_bitstr(const X509* x509); int local_X509_set1_notBefore(X509* x509, const ASN1_TIME*); int local_X509_set1_notAfter(X509* x509, const ASN1_TIME*); const X509_ALGOR* local_X509_get0_tbs_sigalg(const X509* x509); X509_PUBKEY* local_X509_get_X509_PUBKEY(const X509* x509); int32_t local_X509_get_version(const X509* x509); int32_t local_X509_up_ref(X509* x509);
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/binder/bindertracing.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ============================================================ // // bindertracing.cpp // // // Implements helpers for binder tracing // // ============================================================ #include "common.h" #include "bindertracing.h" #include "bindresult.hpp" #include "activitytracker.h" #ifdef FEATURE_EVENT_TRACE #include "eventtracebase.h" #endif // FEATURE_EVENT_TRACE using namespace BINDER_SPACE; namespace { void FireAssemblyLoadStart(const BinderTracing::AssemblyBindOperation::BindRequest &request) { #ifdef FEATURE_EVENT_TRACE if (!EventEnabledAssemblyLoadStart()) return; GUID activityId = GUID_NULL; GUID relatedActivityId = GUID_NULL; ActivityTracker::Start(&activityId, &relatedActivityId); FireEtwAssemblyLoadStart( GetClrInstanceId(), request.AssemblyName, request.AssemblyPath, request.RequestingAssembly, request.AssemblyLoadContext, request.RequestingAssemblyLoadContext, &activityId, &relatedActivityId); #endif // FEATURE_EVENT_TRACE } void FireAssemblyLoadStop(const BinderTracing::AssemblyBindOperation::BindRequest &request, PEAssembly *resultAssembly, bool cached) { #ifdef FEATURE_EVENT_TRACE if (!EventEnabledAssemblyLoadStop()) return; GUID activityId = GUID_NULL; ActivityTracker::Stop(&activityId); SString resultName; SString resultPath; bool success = resultAssembly != nullptr; if (success) { resultPath = resultAssembly->GetPath(); resultAssembly->GetDisplayName(resultName); } FireEtwAssemblyLoadStop( GetClrInstanceId(), request.AssemblyName, request.AssemblyPath, request.RequestingAssembly, request.AssemblyLoadContext, request.RequestingAssemblyLoadContext, success, resultName, resultPath, cached, &activityId); #endif // FEATURE_EVENT_TRACE } void GetAssemblyLoadContextNameFromManagedALC(INT_PTR managedALC, /* out */ SString &alcName) { if (managedALC == GetAppDomain()->GetDefaultBinder()->GetManagedAssemblyLoadContext()) { alcName.Set(W("Default")); return; } OBJECTREF *alc = reinterpret_cast<OBJECTREF *>(managedALC); GCX_COOP(); struct _gc { STRINGREF alcName; } gc; ZeroMemory(&gc, sizeof(gc)); GCPROTECT_BEGIN(gc); PREPARE_VIRTUAL_CALLSITE(METHOD__OBJECT__TO_STRING, *alc); DECLARE_ARGHOLDER_ARRAY(args, 1); args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(*alc); CALL_MANAGED_METHOD_RETREF(gc.alcName, STRINGREF, args); gc.alcName->GetSString(alcName); GCPROTECT_END(); } void GetAssemblyLoadContextNameFromBinder(AssemblyBinder *binder, AppDomain *domain, /*out*/ SString &alcName) { _ASSERTE(binder != nullptr); if (binder->IsDefault()) { alcName.Set(W("Default")); } else { GetAssemblyLoadContextNameFromManagedALC(binder->GetManagedAssemblyLoadContext(), alcName); } } void GetAssemblyLoadContextNameFromSpec(AssemblySpec *spec, /*out*/ SString &alcName) { _ASSERTE(spec != nullptr); AppDomain *domain = spec->GetAppDomain(); AssemblyBinder* binder = spec->GetBinder(); if (binder == nullptr) binder = spec->GetBinderFromParentAssembly(domain); GetAssemblyLoadContextNameFromBinder(binder, domain, alcName); } void PopulateBindRequest(/*inout*/ BinderTracing::AssemblyBindOperation::BindRequest &request) { AssemblySpec *spec = request.AssemblySpec; _ASSERTE(spec != nullptr); if (request.AssemblyPath.IsEmpty()) request.AssemblyPath = spec->GetCodeBase(); if (spec->GetName() != nullptr) spec->GetDisplayName(ASM_DISPLAYF_VERSION | ASM_DISPLAYF_CULTURE | ASM_DISPLAYF_PUBLIC_KEY_TOKEN, request.AssemblyName); DomainAssembly *parentAssembly = spec->GetParentAssembly(); if (parentAssembly != nullptr) { PEAssembly *pPEAssembly = parentAssembly->GetPEAssembly(); _ASSERTE(pPEAssembly != nullptr); pPEAssembly->GetDisplayName(request.RequestingAssembly); AppDomain *domain = parentAssembly->GetAppDomain(); AssemblyBinder *binder = pPEAssembly->GetAssemblyBinder(); GetAssemblyLoadContextNameFromBinder(binder, domain, request.RequestingAssemblyLoadContext); } GetAssemblyLoadContextNameFromSpec(spec, request.AssemblyLoadContext); } const WCHAR *s_assemblyNotFoundMessage = W("Could not locate assembly"); } bool BinderTracing::IsEnabled() { #ifdef FEATURE_EVENT_TRACE // Just check for the AssemblyLoadStart event being enabled. return EventEnabledAssemblyLoadStart(); #endif // FEATURE_EVENT_TRACE return false; } namespace BinderTracing { static thread_local bool t_AssemblyLoadStartInProgress = false; AssemblyBindOperation::AssemblyBindOperation(AssemblySpec *assemblySpec, const SString& assemblyPath) : m_bindRequest { assemblySpec, SString::Empty(), assemblyPath } , m_populatedBindRequest { false } , m_checkedIgnoreBind { false } , m_ignoreBind { false } , m_resultAssembly { nullptr } , m_cached { false } { _ASSERTE(assemblySpec != nullptr); if (!BinderTracing::IsEnabled() || ShouldIgnoreBind()) return; t_AssemblyLoadStartInProgress = true; PopulateBindRequest(m_bindRequest); m_populatedBindRequest = true; FireAssemblyLoadStart(m_bindRequest); } AssemblyBindOperation::~AssemblyBindOperation() { if (BinderTracing::IsEnabled() && !ShouldIgnoreBind()) { t_AssemblyLoadStartInProgress = false; // Make sure the bind request is populated. Tracing may have been enabled mid-bind. if (!m_populatedBindRequest) PopulateBindRequest(m_bindRequest); FireAssemblyLoadStop(m_bindRequest, m_resultAssembly, m_cached); } if (m_resultAssembly != nullptr) m_resultAssembly->Release(); } void AssemblyBindOperation::SetResult(PEAssembly *assembly, bool cached) { _ASSERTE(m_resultAssembly == nullptr); m_resultAssembly = assembly; if (m_resultAssembly != nullptr) m_resultAssembly->AddRef(); m_cached = cached; } bool AssemblyBindOperation::ShouldIgnoreBind() { if (m_checkedIgnoreBind) return m_ignoreBind; // ActivityTracker or EventSource may have triggered the system satellite load, or load of System.Private.CoreLib // Don't track such bindings to avoid potential infinite recursion. m_ignoreBind = t_AssemblyLoadStartInProgress && (m_bindRequest.AssemblySpec->IsCoreLib() || m_bindRequest.AssemblySpec->IsCoreLibSatellite()); m_checkedIgnoreBind = true; return m_ignoreBind; } } namespace BinderTracing { ResolutionAttemptedOperation::ResolutionAttemptedOperation(AssemblyName *assemblyName, AssemblyBinder* binder, INT_PTR managedALC, const HRESULT& hr) : m_hr { hr } , m_stage { Stage::NotYetStarted } , m_tracingEnabled { BinderTracing::IsEnabled() } , m_assemblyNameObject { assemblyName } , m_pFoundAssembly { nullptr } { _ASSERTE(binder != nullptr || managedALC != 0); if (!m_tracingEnabled) return; // When binding the main assembly (by code base instead of name), the assembly name will be null. In this special case, we just // leave the assembly name empty. if (m_assemblyNameObject != nullptr) m_assemblyNameObject->GetDisplayName(m_assemblyName, AssemblyName::INCLUDE_VERSION | AssemblyName::INCLUDE_PUBLIC_KEY_TOKEN); if (managedALC != 0) { GetAssemblyLoadContextNameFromManagedALC(managedALC, m_assemblyLoadContextName); } else { GetAssemblyLoadContextNameFromBinder(binder, GetAppDomain(), m_assemblyLoadContextName); } } // This function simply traces out the two stages represented by the bind result. // It does not change the stage/assembly of the ResolutionAttemptedOperation class instance. void ResolutionAttemptedOperation::TraceBindResult(const BindResult &bindResult, bool mvidMismatch) { if (!m_tracingEnabled) return; // Use the error message that would be reported in the file load exception StackSString errorMsg; if (mvidMismatch) errorMsg.LoadResource(CCompRC::Error, IDS_HOST_ASSEMBLY_RESOLVER_ASSEMBLY_ALREADY_LOADED_IN_CONTEXT); const BindResult::AttemptResult *inContextAttempt = bindResult.GetAttempt(true /*foundInContext*/); const BindResult::AttemptResult *appAssembliesAttempt = bindResult.GetAttempt(false /*foundInContext*/); if (inContextAttempt != nullptr) { // If there the attempt HR represents a success, but the tracked HR represents a failure (e.g. from further validation), report the failed HR bool isLastAttempt = appAssembliesAttempt == nullptr; TraceStage(Stage::FindInLoadContext, isLastAttempt && FAILED(m_hr) && SUCCEEDED(inContextAttempt->HResult) ? m_hr : inContextAttempt->HResult, inContextAttempt->Assembly, mvidMismatch && isLastAttempt ? errorMsg.GetUnicode() : nullptr); } if (appAssembliesAttempt != nullptr) TraceStage(Stage::ApplicationAssemblies, FAILED(m_hr) && SUCCEEDED(appAssembliesAttempt->HResult) ? m_hr : appAssembliesAttempt->HResult, appAssembliesAttempt->Assembly, mvidMismatch ? errorMsg.GetUnicode() : nullptr); } void ResolutionAttemptedOperation::TraceStage(Stage stage, HRESULT hr, BINDER_SPACE::Assembly *resultAssembly, const WCHAR *customError) { if (!m_tracingEnabled || stage == Stage::NotYetStarted) return; PathString resultAssemblyName; StackSString resultAssemblyPath; if (resultAssembly != nullptr) { resultAssembly->GetAssemblyName()->GetDisplayName(resultAssemblyName, AssemblyName::INCLUDE_VERSION | AssemblyName::INCLUDE_PUBLIC_KEY_TOKEN); resultAssemblyPath = resultAssembly->GetPEImage()->GetPath(); } Result result; StackSString errorMsg; if (customError != nullptr) { errorMsg.Set(customError); result = Result::Failure; } else if (!m_exceptionMessage.IsEmpty()) { errorMsg = m_exceptionMessage; result = Result::Exception; } else { switch (hr) { case S_FALSE: case HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND): static_assert(HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND) == COR_E_FILENOTFOUND, "COR_E_FILENOTFOUND has sane value"); result = Result::AssemblyNotFound; errorMsg.Set(s_assemblyNotFoundMessage); break; case FUSION_E_APP_DOMAIN_LOCKED: result = Result::IncompatibleVersion; { errorMsg.Set(W("Requested version")); if (m_assemblyNameObject != nullptr) { const auto &reqVersion = m_assemblyNameObject->GetVersion(); errorMsg.AppendPrintf(W(" %d.%d.%d.%d"), reqVersion->GetMajor(), reqVersion->GetMinor(), reqVersion->GetBuild(), reqVersion->GetRevision()); } errorMsg.Append(W(" is incompatible with found version")); if (resultAssembly != nullptr) { const auto &foundVersion = resultAssembly->GetAssemblyName()->GetVersion(); errorMsg.AppendPrintf(W(" %d.%d.%d.%d"), foundVersion->GetMajor(), foundVersion->GetMinor(), foundVersion->GetBuild(), foundVersion->GetRevision()); } } break; case FUSION_E_REF_DEF_MISMATCH: result = Result::MismatchedAssemblyName; errorMsg.Printf(W("Requested assembly name '%s' does not match found assembly name"), m_assemblyName.GetUnicode()); if (resultAssembly != nullptr) errorMsg.AppendPrintf(W(" '%s'"), resultAssemblyName.GetUnicode()); break; default: if (SUCCEEDED(hr)) { result = Result::Success; _ASSERTE(resultAssembly != nullptr); // Leave errorMsg empty in this case. } else { result = Result::Failure; errorMsg.Printf(W("Resolution failed with HRESULT (%08x)"), m_hr); } } } FireEtwResolutionAttempted( GetClrInstanceId(), m_assemblyName, static_cast<uint16_t>(stage), m_assemblyLoadContextName, static_cast<uint16_t>(result), resultAssemblyName, resultAssemblyPath, errorMsg); } // static void ResolutionAttemptedOperation::TraceAppDomainAssemblyResolve(AssemblySpec *spec, PEAssembly *resultAssembly, Exception *exception) { if (!BinderTracing::IsEnabled()) return; Result result; StackSString errorMessage; StackSString resultAssemblyName; StackSString resultAssemblyPath; if (exception != nullptr) { exception->GetMessage(errorMessage); result = Result::Exception; } else if (resultAssembly != nullptr) { result = Result::Success; resultAssemblyPath = resultAssembly->GetPath(); resultAssembly->GetDisplayName(resultAssemblyName); } else { result = Result::AssemblyNotFound; errorMessage.Set(s_assemblyNotFoundMessage); } StackSString assemblyName; spec->GetDisplayName(ASM_DISPLAYF_VERSION | ASM_DISPLAYF_CULTURE | ASM_DISPLAYF_PUBLIC_KEY_TOKEN, assemblyName); StackSString alcName; GetAssemblyLoadContextNameFromSpec(spec, alcName); FireEtwResolutionAttempted( GetClrInstanceId(), assemblyName, static_cast<uint16_t>(Stage::AppDomainAssemblyResolveEvent), alcName, static_cast<uint16_t>(result), resultAssemblyName, resultAssemblyPath, errorMessage); } } void BinderTracing::PathProbed(const WCHAR *path, BinderTracing::PathSource source, HRESULT hr) { FireEtwKnownPathProbed(GetClrInstanceId(), path, source, hr); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ============================================================ // // bindertracing.cpp // // // Implements helpers for binder tracing // // ============================================================ #include "common.h" #include "bindertracing.h" #include "bindresult.hpp" #include "activitytracker.h" #ifdef FEATURE_EVENT_TRACE #include "eventtracebase.h" #endif // FEATURE_EVENT_TRACE using namespace BINDER_SPACE; namespace { void FireAssemblyLoadStart(const BinderTracing::AssemblyBindOperation::BindRequest &request) { #ifdef FEATURE_EVENT_TRACE if (!EventEnabledAssemblyLoadStart()) return; GUID activityId = GUID_NULL; GUID relatedActivityId = GUID_NULL; ActivityTracker::Start(&activityId, &relatedActivityId); FireEtwAssemblyLoadStart( GetClrInstanceId(), request.AssemblyName, request.AssemblyPath, request.RequestingAssembly, request.AssemblyLoadContext, request.RequestingAssemblyLoadContext, &activityId, &relatedActivityId); #endif // FEATURE_EVENT_TRACE } void FireAssemblyLoadStop(const BinderTracing::AssemblyBindOperation::BindRequest &request, PEAssembly *resultAssembly, bool cached) { #ifdef FEATURE_EVENT_TRACE if (!EventEnabledAssemblyLoadStop()) return; GUID activityId = GUID_NULL; ActivityTracker::Stop(&activityId); SString resultName; SString resultPath; bool success = resultAssembly != nullptr; if (success) { resultPath = resultAssembly->GetPath(); resultAssembly->GetDisplayName(resultName); } FireEtwAssemblyLoadStop( GetClrInstanceId(), request.AssemblyName, request.AssemblyPath, request.RequestingAssembly, request.AssemblyLoadContext, request.RequestingAssemblyLoadContext, success, resultName, resultPath, cached, &activityId); #endif // FEATURE_EVENT_TRACE } void GetAssemblyLoadContextNameFromManagedALC(INT_PTR managedALC, /* out */ SString &alcName) { if (managedALC == GetAppDomain()->GetDefaultBinder()->GetManagedAssemblyLoadContext()) { alcName.Set(W("Default")); return; } OBJECTREF *alc = reinterpret_cast<OBJECTREF *>(managedALC); GCX_COOP(); struct _gc { STRINGREF alcName; } gc; ZeroMemory(&gc, sizeof(gc)); GCPROTECT_BEGIN(gc); PREPARE_VIRTUAL_CALLSITE(METHOD__OBJECT__TO_STRING, *alc); DECLARE_ARGHOLDER_ARRAY(args, 1); args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(*alc); CALL_MANAGED_METHOD_RETREF(gc.alcName, STRINGREF, args); gc.alcName->GetSString(alcName); GCPROTECT_END(); } void GetAssemblyLoadContextNameFromBinder(AssemblyBinder *binder, AppDomain *domain, /*out*/ SString &alcName) { _ASSERTE(binder != nullptr); if (binder->IsDefault()) { alcName.Set(W("Default")); } else { GetAssemblyLoadContextNameFromManagedALC(binder->GetManagedAssemblyLoadContext(), alcName); } } void GetAssemblyLoadContextNameFromSpec(AssemblySpec *spec, /*out*/ SString &alcName) { _ASSERTE(spec != nullptr); AppDomain *domain = spec->GetAppDomain(); AssemblyBinder* binder = spec->GetBinder(); if (binder == nullptr) binder = spec->GetBinderFromParentAssembly(domain); GetAssemblyLoadContextNameFromBinder(binder, domain, alcName); } void PopulateBindRequest(/*inout*/ BinderTracing::AssemblyBindOperation::BindRequest &request) { AssemblySpec *spec = request.AssemblySpec; _ASSERTE(spec != nullptr); if (request.AssemblyPath.IsEmpty()) request.AssemblyPath = spec->GetCodeBase(); if (spec->GetName() != nullptr) spec->GetDisplayName(ASM_DISPLAYF_VERSION | ASM_DISPLAYF_CULTURE | ASM_DISPLAYF_PUBLIC_KEY_TOKEN, request.AssemblyName); DomainAssembly *parentAssembly = spec->GetParentAssembly(); if (parentAssembly != nullptr) { PEAssembly *pPEAssembly = parentAssembly->GetPEAssembly(); _ASSERTE(pPEAssembly != nullptr); pPEAssembly->GetDisplayName(request.RequestingAssembly); AppDomain *domain = parentAssembly->GetAppDomain(); AssemblyBinder *binder = pPEAssembly->GetAssemblyBinder(); GetAssemblyLoadContextNameFromBinder(binder, domain, request.RequestingAssemblyLoadContext); } GetAssemblyLoadContextNameFromSpec(spec, request.AssemblyLoadContext); } const WCHAR *s_assemblyNotFoundMessage = W("Could not locate assembly"); } bool BinderTracing::IsEnabled() { #ifdef FEATURE_EVENT_TRACE // Just check for the AssemblyLoadStart event being enabled. return EventEnabledAssemblyLoadStart(); #endif // FEATURE_EVENT_TRACE return false; } namespace BinderTracing { static thread_local bool t_AssemblyLoadStartInProgress = false; AssemblyBindOperation::AssemblyBindOperation(AssemblySpec *assemblySpec, const SString& assemblyPath) : m_bindRequest { assemblySpec, SString::Empty(), assemblyPath } , m_populatedBindRequest { false } , m_checkedIgnoreBind { false } , m_ignoreBind { false } , m_resultAssembly { nullptr } , m_cached { false } { _ASSERTE(assemblySpec != nullptr); if (!BinderTracing::IsEnabled() || ShouldIgnoreBind()) return; t_AssemblyLoadStartInProgress = true; PopulateBindRequest(m_bindRequest); m_populatedBindRequest = true; FireAssemblyLoadStart(m_bindRequest); } AssemblyBindOperation::~AssemblyBindOperation() { if (BinderTracing::IsEnabled() && !ShouldIgnoreBind()) { t_AssemblyLoadStartInProgress = false; // Make sure the bind request is populated. Tracing may have been enabled mid-bind. if (!m_populatedBindRequest) PopulateBindRequest(m_bindRequest); FireAssemblyLoadStop(m_bindRequest, m_resultAssembly, m_cached); } if (m_resultAssembly != nullptr) m_resultAssembly->Release(); } void AssemblyBindOperation::SetResult(PEAssembly *assembly, bool cached) { _ASSERTE(m_resultAssembly == nullptr); m_resultAssembly = assembly; if (m_resultAssembly != nullptr) m_resultAssembly->AddRef(); m_cached = cached; } bool AssemblyBindOperation::ShouldIgnoreBind() { if (m_checkedIgnoreBind) return m_ignoreBind; // ActivityTracker or EventSource may have triggered the system satellite load, or load of System.Private.CoreLib // Don't track such bindings to avoid potential infinite recursion. m_ignoreBind = t_AssemblyLoadStartInProgress && (m_bindRequest.AssemblySpec->IsCoreLib() || m_bindRequest.AssemblySpec->IsCoreLibSatellite()); m_checkedIgnoreBind = true; return m_ignoreBind; } } namespace BinderTracing { ResolutionAttemptedOperation::ResolutionAttemptedOperation(AssemblyName *assemblyName, AssemblyBinder* binder, INT_PTR managedALC, const HRESULT& hr) : m_hr { hr } , m_stage { Stage::NotYetStarted } , m_tracingEnabled { BinderTracing::IsEnabled() } , m_assemblyNameObject { assemblyName } , m_pFoundAssembly { nullptr } { _ASSERTE(binder != nullptr || managedALC != 0); if (!m_tracingEnabled) return; // When binding the main assembly (by code base instead of name), the assembly name will be null. In this special case, we just // leave the assembly name empty. if (m_assemblyNameObject != nullptr) m_assemblyNameObject->GetDisplayName(m_assemblyName, AssemblyName::INCLUDE_VERSION | AssemblyName::INCLUDE_PUBLIC_KEY_TOKEN); if (managedALC != 0) { GetAssemblyLoadContextNameFromManagedALC(managedALC, m_assemblyLoadContextName); } else { GetAssemblyLoadContextNameFromBinder(binder, GetAppDomain(), m_assemblyLoadContextName); } } // This function simply traces out the two stages represented by the bind result. // It does not change the stage/assembly of the ResolutionAttemptedOperation class instance. void ResolutionAttemptedOperation::TraceBindResult(const BindResult &bindResult, bool mvidMismatch) { if (!m_tracingEnabled) return; // Use the error message that would be reported in the file load exception StackSString errorMsg; if (mvidMismatch) errorMsg.LoadResource(CCompRC::Error, IDS_HOST_ASSEMBLY_RESOLVER_ASSEMBLY_ALREADY_LOADED_IN_CONTEXT); const BindResult::AttemptResult *inContextAttempt = bindResult.GetAttempt(true /*foundInContext*/); const BindResult::AttemptResult *appAssembliesAttempt = bindResult.GetAttempt(false /*foundInContext*/); if (inContextAttempt != nullptr) { // If there the attempt HR represents a success, but the tracked HR represents a failure (e.g. from further validation), report the failed HR bool isLastAttempt = appAssembliesAttempt == nullptr; TraceStage(Stage::FindInLoadContext, isLastAttempt && FAILED(m_hr) && SUCCEEDED(inContextAttempt->HResult) ? m_hr : inContextAttempt->HResult, inContextAttempt->Assembly, mvidMismatch && isLastAttempt ? errorMsg.GetUnicode() : nullptr); } if (appAssembliesAttempt != nullptr) TraceStage(Stage::ApplicationAssemblies, FAILED(m_hr) && SUCCEEDED(appAssembliesAttempt->HResult) ? m_hr : appAssembliesAttempt->HResult, appAssembliesAttempt->Assembly, mvidMismatch ? errorMsg.GetUnicode() : nullptr); } void ResolutionAttemptedOperation::TraceStage(Stage stage, HRESULT hr, BINDER_SPACE::Assembly *resultAssembly, const WCHAR *customError) { if (!m_tracingEnabled || stage == Stage::NotYetStarted) return; PathString resultAssemblyName; StackSString resultAssemblyPath; if (resultAssembly != nullptr) { resultAssembly->GetAssemblyName()->GetDisplayName(resultAssemblyName, AssemblyName::INCLUDE_VERSION | AssemblyName::INCLUDE_PUBLIC_KEY_TOKEN); resultAssemblyPath = resultAssembly->GetPEImage()->GetPath(); } Result result; StackSString errorMsg; if (customError != nullptr) { errorMsg.Set(customError); result = Result::Failure; } else if (!m_exceptionMessage.IsEmpty()) { errorMsg = m_exceptionMessage; result = Result::Exception; } else { switch (hr) { case S_FALSE: case HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND): static_assert(HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND) == COR_E_FILENOTFOUND, "COR_E_FILENOTFOUND has sane value"); result = Result::AssemblyNotFound; errorMsg.Set(s_assemblyNotFoundMessage); break; case FUSION_E_APP_DOMAIN_LOCKED: result = Result::IncompatibleVersion; { errorMsg.Set(W("Requested version")); if (m_assemblyNameObject != nullptr) { const auto &reqVersion = m_assemblyNameObject->GetVersion(); errorMsg.AppendPrintf(W(" %d.%d.%d.%d"), reqVersion->GetMajor(), reqVersion->GetMinor(), reqVersion->GetBuild(), reqVersion->GetRevision()); } errorMsg.Append(W(" is incompatible with found version")); if (resultAssembly != nullptr) { const auto &foundVersion = resultAssembly->GetAssemblyName()->GetVersion(); errorMsg.AppendPrintf(W(" %d.%d.%d.%d"), foundVersion->GetMajor(), foundVersion->GetMinor(), foundVersion->GetBuild(), foundVersion->GetRevision()); } } break; case FUSION_E_REF_DEF_MISMATCH: result = Result::MismatchedAssemblyName; errorMsg.Printf(W("Requested assembly name '%s' does not match found assembly name"), m_assemblyName.GetUnicode()); if (resultAssembly != nullptr) errorMsg.AppendPrintf(W(" '%s'"), resultAssemblyName.GetUnicode()); break; default: if (SUCCEEDED(hr)) { result = Result::Success; _ASSERTE(resultAssembly != nullptr); // Leave errorMsg empty in this case. } else { result = Result::Failure; errorMsg.Printf(W("Resolution failed with HRESULT (%08x)"), m_hr); } } } FireEtwResolutionAttempted( GetClrInstanceId(), m_assemblyName, static_cast<uint16_t>(stage), m_assemblyLoadContextName, static_cast<uint16_t>(result), resultAssemblyName, resultAssemblyPath, errorMsg); } // static void ResolutionAttemptedOperation::TraceAppDomainAssemblyResolve(AssemblySpec *spec, PEAssembly *resultAssembly, Exception *exception) { if (!BinderTracing::IsEnabled()) return; Result result; StackSString errorMessage; StackSString resultAssemblyName; StackSString resultAssemblyPath; if (exception != nullptr) { exception->GetMessage(errorMessage); result = Result::Exception; } else if (resultAssembly != nullptr) { result = Result::Success; resultAssemblyPath = resultAssembly->GetPath(); resultAssembly->GetDisplayName(resultAssemblyName); } else { result = Result::AssemblyNotFound; errorMessage.Set(s_assemblyNotFoundMessage); } StackSString assemblyName; spec->GetDisplayName(ASM_DISPLAYF_VERSION | ASM_DISPLAYF_CULTURE | ASM_DISPLAYF_PUBLIC_KEY_TOKEN, assemblyName); StackSString alcName; GetAssemblyLoadContextNameFromSpec(spec, alcName); FireEtwResolutionAttempted( GetClrInstanceId(), assemblyName, static_cast<uint16_t>(Stage::AppDomainAssemblyResolveEvent), alcName, static_cast<uint16_t>(result), resultAssemblyName, resultAssemblyPath, errorMessage); } } void BinderTracing::PathProbed(const WCHAR *path, BinderTracing::PathSource source, HRESULT hr) { FireEtwKnownPathProbed(GetClrInstanceId(), path, source, hr); }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/jit/lower.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Lower XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #ifndef _LOWER_H_ #define _LOWER_H_ #include "compiler.h" #include "phase.h" #include "lsra.h" #include "sideeffects.h" class Lowering final : public Phase { public: inline Lowering(Compiler* compiler, LinearScanInterface* lsra) : Phase(compiler, PHASE_LOWERING), vtableCallTemp(BAD_VAR_NUM) { m_lsra = (LinearScan*)lsra; assert(m_lsra); } virtual PhaseStatus DoPhase() override; // This variant of LowerRange is called from outside of the main Lowering pass, // so it creates its own instance of Lowering to do so. void LowerRange(BasicBlock* block, LIR::ReadOnlyRange& range) { Lowering lowerer(comp, m_lsra); lowerer.m_block = block; lowerer.LowerRange(range); } private: // LowerRange handles new code that is introduced by or after Lowering. void LowerRange(LIR::ReadOnlyRange& range) { for (GenTree* newNode : range) { LowerNode(newNode); } } void LowerRange(GenTree* firstNode, GenTree* lastNode) { LIR::ReadOnlyRange range(firstNode, lastNode); LowerRange(range); } // ContainCheckRange handles new code that is introduced by or after Lowering, // and that is known to be already in Lowered form. void ContainCheckRange(LIR::ReadOnlyRange& range) { for (GenTree* newNode : range) { ContainCheckNode(newNode); } } void ContainCheckRange(GenTree* firstNode, GenTree* lastNode) { LIR::ReadOnlyRange range(firstNode, lastNode); ContainCheckRange(range); } void InsertTreeBeforeAndContainCheck(GenTree* insertionPoint, GenTree* tree) { LIR::Range range = LIR::SeqTree(comp, tree); ContainCheckRange(range); BlockRange().InsertBefore(insertionPoint, std::move(range)); } void ContainCheckNode(GenTree* node); void ContainCheckDivOrMod(GenTreeOp* node); void ContainCheckReturnTrap(GenTreeOp* node); void ContainCheckArrOffset(GenTreeArrOffs* node); void ContainCheckLclHeap(GenTreeOp* node); void ContainCheckRet(GenTreeUnOp* ret); void ContainCheckJTrue(GenTreeOp* node); void ContainCheckBitCast(GenTree* node); void ContainCheckCallOperands(GenTreeCall* call); void ContainCheckIndir(GenTreeIndir* indirNode); void ContainCheckStoreIndir(GenTreeStoreInd* indirNode); void ContainCheckMul(GenTreeOp* node); void ContainCheckShiftRotate(GenTreeOp* node); void ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const; void ContainCheckCast(GenTreeCast* node); void ContainCheckCompare(GenTreeOp* node); void ContainCheckBinary(GenTreeOp* node); void ContainCheckBoundsChk(GenTreeBoundsChk* node); #ifdef TARGET_XARCH void ContainCheckFloatBinary(GenTreeOp* node); void ContainCheckIntrinsic(GenTreeOp* node); #endif // TARGET_XARCH #ifdef FEATURE_SIMD void ContainCheckSIMD(GenTreeSIMD* simdNode); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS void ContainCheckHWIntrinsicAddr(GenTreeHWIntrinsic* node, GenTree* addr); void ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node); #endif // FEATURE_HW_INTRINSICS #ifdef DEBUG static void CheckCallArg(GenTree* arg); static void CheckCall(GenTreeCall* call); static void CheckNode(Compiler* compiler, GenTree* node); static bool CheckBlock(Compiler* compiler, BasicBlock* block); #endif // DEBUG void LowerBlock(BasicBlock* block); GenTree* LowerNode(GenTree* node); bool IsInvariantInRange(GenTree* node, GenTree* endExclusive); // ------------------------------ // Call Lowering // ------------------------------ void LowerCall(GenTree* call); void LowerCFGCall(GenTreeCall* call); void MoveCFGCallArg(GenTreeCall* call, GenTree* node); #ifndef TARGET_64BIT GenTree* DecomposeLongCompare(GenTree* cmp); #endif GenTree* OptimizeConstCompare(GenTree* cmp); GenTree* LowerCompare(GenTree* cmp); GenTree* LowerJTrue(GenTreeOp* jtrue); GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition); void LowerJmpMethod(GenTree* jmp); void LowerRet(GenTreeUnOp* ret); void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar); void LowerRetStruct(GenTreeUnOp* ret); void LowerRetSingleRegStructLclVar(GenTreeUnOp* ret); void LowerCallStruct(GenTreeCall* call); void LowerStoreSingleRegCallStruct(GenTreeBlk* store); #if !defined(WINDOWS_AMD64_ABI) GenTreeLclVar* SpillStructCallResult(GenTreeCall* call) const; #endif // WINDOWS_AMD64_ABI GenTree* LowerDelegateInvoke(GenTreeCall* call); GenTree* LowerIndirectNonvirtCall(GenTreeCall* call); GenTree* LowerDirectCall(GenTreeCall* call); GenTree* LowerNonvirtPinvokeCall(GenTreeCall* call); GenTree* LowerTailCallViaJitHelper(GenTreeCall* callNode, GenTree* callTarget); void LowerFastTailCall(GenTreeCall* callNode); void RehomeArgForFastTailCall(unsigned int lclNum, GenTree* insertTempBefore, GenTree* lookForUsesStart, GenTreeCall* callNode); void InsertProfTailCallHook(GenTreeCall* callNode, GenTree* insertionPoint); GenTree* LowerVirtualVtableCall(GenTreeCall* call); GenTree* LowerVirtualStubCall(GenTreeCall* call); void LowerArgsForCall(GenTreeCall* call); void ReplaceArgWithPutArgOrBitcast(GenTree** ppChild, GenTree* newNode); GenTree* NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type); void LowerArg(GenTreeCall* call, GenTree** ppTree); #ifdef TARGET_ARMARCH GenTree* LowerFloatArg(GenTree** pArg, fgArgTabEntry* info); GenTree* LowerFloatArgReg(GenTree* arg, regNumber regNum); #endif void InsertPInvokeCallProlog(GenTreeCall* call); void InsertPInvokeCallEpilog(GenTreeCall* call); void InsertPInvokeMethodProlog(); void InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr)); GenTree* SetGCState(int cns); GenTree* CreateReturnTrapSeq(); enum FrameLinkAction { PushFrame, PopFrame }; GenTree* CreateFrameLinkUpdate(FrameLinkAction); GenTree* AddrGen(ssize_t addr); GenTree* AddrGen(void* addr); GenTree* Ind(GenTree* tree, var_types type = TYP_I_IMPL) { return comp->gtNewOperNode(GT_IND, type, tree); } GenTree* PhysReg(regNumber reg, var_types type = TYP_I_IMPL) { return comp->gtNewPhysRegNode(reg, type); } GenTree* ThisReg(GenTreeCall* call) { return PhysReg(comp->codeGen->genGetThisArgReg(call), TYP_REF); } GenTree* Offset(GenTree* base, unsigned offset) { var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet(); return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, nullptr, 0, offset); } GenTree* OffsetByIndex(GenTree* base, GenTree* index) { var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet(); return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, 0, 0); } GenTree* OffsetByIndexWithScale(GenTree* base, GenTree* index, unsigned scale) { var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet(); return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, scale, 0); } // Replace the definition of the given use with a lclVar, allocating a new temp // if 'tempNum' is BAD_VAR_NUM. Returns the LclVar node. GenTreeLclVar* ReplaceWithLclVar(LIR::Use& use, unsigned tempNum = BAD_VAR_NUM) { GenTree* oldUseNode = use.Def(); if ((oldUseNode->gtOper != GT_LCL_VAR) || (tempNum != BAD_VAR_NUM)) { GenTree* assign; use.ReplaceWithLclVar(comp, tempNum, &assign); GenTree* newUseNode = use.Def(); ContainCheckRange(oldUseNode->gtNext, newUseNode); // We need to lower the LclVar and assignment since there may be certain // types or scenarios, such as TYP_SIMD12, that need special handling LowerNode(assign); LowerNode(newUseNode); return newUseNode->AsLclVar(); } return oldUseNode->AsLclVar(); } // return true if this call target is within range of a pc-rel call on the machine bool IsCallTargetInRange(void* addr); #if defined(TARGET_XARCH) GenTree* PreferredRegOptionalOperand(GenTree* tree); // ------------------------------------------------------------------ // SetRegOptionalBinOp - Indicates which of the operands of a bin-op // register requirement is optional. Xarch instruction set allows // either of op1 or op2 of binary operation (e.g. add, mul etc) to be // a memory operand. This routine provides info to register allocator // which of its operands optionally require a register. Lsra might not // allocate a register to RefTypeUse positions of such operands if it // is beneficial. In such a case codegen will treat them as memory // operands. // // Arguments: // tree - Gentree of a binary operation. // isSafeToMarkOp1 True if it's safe to mark op1 as register optional // isSafeToMarkOp2 True if it's safe to mark op2 as register optional // // Returns // The caller is expected to get isSafeToMarkOp1 and isSafeToMarkOp2 // by calling IsSafeToContainMem. // // Note: On xarch at most only one of the operands will be marked as // reg optional, even when both operands could be considered register // optional. void SetRegOptionalForBinOp(GenTree* tree, bool isSafeToMarkOp1, bool isSafeToMarkOp2) { assert(GenTree::OperIsBinary(tree->OperGet())); GenTree* const op1 = tree->gtGetOp1(); GenTree* const op2 = tree->gtGetOp2(); const unsigned operatorSize = genTypeSize(tree->TypeGet()); const bool op1Legal = isSafeToMarkOp1 && tree->OperIsCommutative() && (operatorSize == genTypeSize(op1->TypeGet())); const bool op2Legal = isSafeToMarkOp2 && (operatorSize == genTypeSize(op2->TypeGet())); GenTree* regOptionalOperand = nullptr; if (op1Legal) { regOptionalOperand = op2Legal ? PreferredRegOptionalOperand(tree) : op1; } else if (op2Legal) { regOptionalOperand = op2; } if (regOptionalOperand != nullptr) { regOptionalOperand->SetRegOptional(); } } #endif // defined(TARGET_XARCH) // Per tree node member functions void LowerStoreIndirCommon(GenTreeStoreInd* ind); void LowerIndir(GenTreeIndir* ind); void LowerStoreIndir(GenTreeStoreInd* node); GenTree* LowerAdd(GenTreeOp* node); GenTree* LowerMul(GenTreeOp* mul); GenTree* LowerBinaryArithmetic(GenTreeOp* binOp); bool LowerUnsignedDivOrMod(GenTreeOp* divMod); GenTree* LowerConstIntDivOrMod(GenTree* node); GenTree* LowerSignedDivOrMod(GenTree* node); void LowerBlockStore(GenTreeBlk* blkNode); void LowerBlockStoreCommon(GenTreeBlk* blkNode); void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr); void LowerPutArgStk(GenTreePutArgStk* tree); bool TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* parent); bool TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode); GenTree* LowerSwitch(GenTree* node); bool TryLowerSwitchToBitTest( BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue); void LowerCast(GenTree* node); #if !CPU_LOAD_STORE_ARCH bool IsRMWIndirCandidate(GenTree* operand, GenTree* storeInd); bool IsBinOpInRMWStoreInd(GenTree* tree); bool IsRMWMemOpRootedAtStoreInd(GenTree* storeIndTree, GenTree** indirCandidate, GenTree** indirOpSource); bool LowerRMWMemOp(GenTreeIndir* storeInd); #endif void WidenSIMD12IfNecessary(GenTreeLclVarCommon* node); bool CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc); void LowerStoreLoc(GenTreeLclVarCommon* tree); GenTree* LowerArrElem(GenTree* node); void LowerRotate(GenTree* tree); void LowerShift(GenTreeOp* shift); #ifdef FEATURE_SIMD void LowerSIMD(GenTreeSIMD* simdNode); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS void LowerHWIntrinsic(GenTreeHWIntrinsic* node); void LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition); void LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp); void LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node); void LowerHWIntrinsicDot(GenTreeHWIntrinsic* node); #if defined(TARGET_XARCH) void LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node); void LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node); void LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node); void LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node); GenTree* TryLowerAndOpToResetLowestSetBit(GenTreeOp* andNode); GenTree* TryLowerAndOpToAndNot(GenTreeOp* andNode); #elif defined(TARGET_ARM64) bool IsValidConstForMovImm(GenTreeHWIntrinsic* node); void LowerHWIntrinsicFusedMultiplyAddScalar(GenTreeHWIntrinsic* node); #endif // !TARGET_XARCH && !TARGET_ARM64 union VectorConstant { int8_t i8[32]; uint8_t u8[32]; int16_t i16[16]; uint16_t u16[16]; int32_t i32[8]; uint32_t u32[8]; int64_t i64[4]; uint64_t u64[4]; float f32[8]; double f64[4]; }; //---------------------------------------------------------------------------------------------- // VectorConstantIsBroadcastedI64: Check N i64 elements in a constant vector for equality // // Arguments: // vecCns - Constant vector // count - Amount of i64 components to compare // // Returns: // true if N i64 elements of the given vector are equal static bool VectorConstantIsBroadcastedI64(VectorConstant& vecCns, int count) { assert(count >= 1 && count <= 4); for (int i = 1; i < count; i++) { if (vecCns.i64[i] != vecCns.i64[0]) { return false; } } return true; } //---------------------------------------------------------------------------------------------- // ProcessArgForHWIntrinsicCreate: Processes an argument for the Lowering::LowerHWIntrinsicCreate method // // Arguments: // arg - The argument to process // argIdx - The index of the argument being processed // vecCns - The vector constant being constructed // baseType - The base type of the vector constant // // Returns: // true if arg was a constant; otherwise, false static bool HandleArgForHWIntrinsicCreate(GenTree* arg, int argIdx, VectorConstant& vecCns, var_types baseType) { switch (baseType) { case TYP_BYTE: case TYP_UBYTE: { if (arg->IsCnsIntOrI()) { vecCns.i8[argIdx] = static_cast<int8_t>(arg->AsIntCon()->gtIconVal); return true; } else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i8[argIdx] == 0); } break; } case TYP_SHORT: case TYP_USHORT: { if (arg->IsCnsIntOrI()) { vecCns.i16[argIdx] = static_cast<int16_t>(arg->AsIntCon()->gtIconVal); return true; } else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i16[argIdx] == 0); } break; } case TYP_INT: case TYP_UINT: { if (arg->IsCnsIntOrI()) { vecCns.i32[argIdx] = static_cast<int32_t>(arg->AsIntCon()->gtIconVal); return true; } else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i32[argIdx] == 0); } break; } case TYP_LONG: case TYP_ULONG: { #if defined(TARGET_64BIT) if (arg->IsCnsIntOrI()) { vecCns.i64[argIdx] = static_cast<int64_t>(arg->AsIntCon()->gtIconVal); return true; } #else if (arg->OperIsLong() && arg->AsOp()->gtOp1->IsCnsIntOrI() && arg->AsOp()->gtOp2->IsCnsIntOrI()) { // 32-bit targets will decompose GT_CNS_LNG into two GT_CNS_INT // We need to reconstruct the 64-bit value in order to handle this INT64 gtLconVal = arg->AsOp()->gtOp2->AsIntCon()->gtIconVal; gtLconVal <<= 32; gtLconVal |= arg->AsOp()->gtOp1->AsIntCon()->gtIconVal; vecCns.i64[argIdx] = gtLconVal; return true; } #endif // TARGET_64BIT else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i64[argIdx] == 0); } break; } case TYP_FLOAT: { if (arg->IsCnsFltOrDbl()) { vecCns.f32[argIdx] = static_cast<float>(arg->AsDblCon()->gtDconVal); return true; } else { // We expect the VectorConstant to have been already zeroed // We check against the i32, rather than f32, to account for -0.0 assert(vecCns.i32[argIdx] == 0); } break; } case TYP_DOUBLE: { if (arg->IsCnsFltOrDbl()) { vecCns.f64[argIdx] = static_cast<double>(arg->AsDblCon()->gtDconVal); return true; } else { // We expect the VectorConstant to have been already zeroed // We check against the i64, rather than f64, to account for -0.0 assert(vecCns.i64[argIdx] == 0); } break; } default: { unreached(); } } return false; } #endif // FEATURE_HW_INTRINSICS //---------------------------------------------------------------------------------------------- // TryRemoveCastIfPresent: Removes op it is a cast operation and the size of its input is at // least the size of expectedType // // Arguments: // expectedType - The expected type of the cast operation input if it is to be removed // op - The tree to remove if it is a cast op whose input is at least the size of expectedType // // Returns: // op if it was not a cast node or if its input is not at least the size of expected type; // Otherwise, it returns the underlying operation that was being casted GenTree* TryRemoveCastIfPresent(var_types expectedType, GenTree* op) { if (!op->OperIs(GT_CAST)) { return op; } GenTree* castOp = op->AsCast()->CastOp(); if (genTypeSize(castOp->gtType) >= genTypeSize(expectedType)) { BlockRange().Remove(op); return castOp; } return op; } // Utility functions public: static bool IndirsAreEquivalent(GenTree* pTreeA, GenTree* pTreeB); // return true if 'childNode' is an immediate that can be contained // by the 'parentNode' (i.e. folded into an instruction) // for example small enough and non-relocatable bool IsContainableImmed(GenTree* parentNode, GenTree* childNode) const; // Return true if 'node' is a containable memory op. bool IsContainableMemoryOp(GenTree* node) const { return m_lsra->isContainableMemoryOp(node); } #ifdef FEATURE_HW_INTRINSICS // Tries to get a containable node for a given HWIntrinsic bool TryGetContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode, GenTree** pNode, bool* supportsRegOptional, GenTreeHWIntrinsic* transparentParentNode = nullptr); #endif // FEATURE_HW_INTRINSICS static void TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block); private: static bool NodesAreEquivalentLeaves(GenTree* candidate, GenTree* storeInd); bool AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index); // Makes 'childNode' contained in the 'parentNode' void MakeSrcContained(GenTree* parentNode, GenTree* childNode) const; // Checks and makes 'childNode' contained in the 'parentNode' bool CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode); // Checks for memory conflicts in the instructions between childNode and parentNode, and returns true if childNode // can be contained. bool IsSafeToContainMem(GenTree* parentNode, GenTree* childNode) const; // Similar to above, but allows bypassing a "transparent" parent. bool IsSafeToContainMem(GenTree* grandparentNode, GenTree* parentNode, GenTree* childNode) const; inline LIR::Range& BlockRange() const { return LIR::AsRange(m_block); } // Any tracked lclVar accessed by a LCL_FLD or STORE_LCL_FLD should be marked doNotEnregister. // This method checks, and asserts in the DEBUG case if it is not so marked, // but in the non-DEBUG case (asserts disabled) set the flag so that we don't generate bad code. // This ensures that the local's value is valid on-stack as expected for a *LCL_FLD. void verifyLclFldDoNotEnregister(unsigned lclNum) { LclVarDsc* varDsc = comp->lvaGetDesc(lclNum); // Do a couple of simple checks before setting lvDoNotEnregister. // This may not cover all cases in 'isRegCandidate()' but we don't want to // do an expensive check here. For non-candidates it is not harmful to set lvDoNotEnregister. if (varDsc->lvTracked && !varDsc->lvDoNotEnregister) { assert(!m_lsra->isRegCandidate(varDsc)); comp->lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } } LinearScan* m_lsra; unsigned vtableCallTemp; // local variable we use as a temp for vtable calls mutable SideEffectSet m_scratchSideEffects; // SideEffectSet used for IsSafeToContainMem and isRMWIndirCandidate BasicBlock* m_block; }; #endif // _LOWER_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Lower XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #ifndef _LOWER_H_ #define _LOWER_H_ #include "compiler.h" #include "phase.h" #include "lsra.h" #include "sideeffects.h" class Lowering final : public Phase { public: inline Lowering(Compiler* compiler, LinearScanInterface* lsra) : Phase(compiler, PHASE_LOWERING), vtableCallTemp(BAD_VAR_NUM) { m_lsra = (LinearScan*)lsra; assert(m_lsra); } virtual PhaseStatus DoPhase() override; // This variant of LowerRange is called from outside of the main Lowering pass, // so it creates its own instance of Lowering to do so. void LowerRange(BasicBlock* block, LIR::ReadOnlyRange& range) { Lowering lowerer(comp, m_lsra); lowerer.m_block = block; lowerer.LowerRange(range); } private: // LowerRange handles new code that is introduced by or after Lowering. void LowerRange(LIR::ReadOnlyRange& range) { for (GenTree* newNode : range) { LowerNode(newNode); } } void LowerRange(GenTree* firstNode, GenTree* lastNode) { LIR::ReadOnlyRange range(firstNode, lastNode); LowerRange(range); } // ContainCheckRange handles new code that is introduced by or after Lowering, // and that is known to be already in Lowered form. void ContainCheckRange(LIR::ReadOnlyRange& range) { for (GenTree* newNode : range) { ContainCheckNode(newNode); } } void ContainCheckRange(GenTree* firstNode, GenTree* lastNode) { LIR::ReadOnlyRange range(firstNode, lastNode); ContainCheckRange(range); } void InsertTreeBeforeAndContainCheck(GenTree* insertionPoint, GenTree* tree) { LIR::Range range = LIR::SeqTree(comp, tree); ContainCheckRange(range); BlockRange().InsertBefore(insertionPoint, std::move(range)); } void ContainCheckNode(GenTree* node); void ContainCheckDivOrMod(GenTreeOp* node); void ContainCheckReturnTrap(GenTreeOp* node); void ContainCheckArrOffset(GenTreeArrOffs* node); void ContainCheckLclHeap(GenTreeOp* node); void ContainCheckRet(GenTreeUnOp* ret); void ContainCheckJTrue(GenTreeOp* node); void ContainCheckBitCast(GenTree* node); void ContainCheckCallOperands(GenTreeCall* call); void ContainCheckIndir(GenTreeIndir* indirNode); void ContainCheckStoreIndir(GenTreeStoreInd* indirNode); void ContainCheckMul(GenTreeOp* node); void ContainCheckShiftRotate(GenTreeOp* node); void ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const; void ContainCheckCast(GenTreeCast* node); void ContainCheckCompare(GenTreeOp* node); void ContainCheckBinary(GenTreeOp* node); void ContainCheckBoundsChk(GenTreeBoundsChk* node); #ifdef TARGET_XARCH void ContainCheckFloatBinary(GenTreeOp* node); void ContainCheckIntrinsic(GenTreeOp* node); #endif // TARGET_XARCH #ifdef FEATURE_SIMD void ContainCheckSIMD(GenTreeSIMD* simdNode); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS void ContainCheckHWIntrinsicAddr(GenTreeHWIntrinsic* node, GenTree* addr); void ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node); #endif // FEATURE_HW_INTRINSICS #ifdef DEBUG static void CheckCallArg(GenTree* arg); static void CheckCall(GenTreeCall* call); static void CheckNode(Compiler* compiler, GenTree* node); static bool CheckBlock(Compiler* compiler, BasicBlock* block); #endif // DEBUG void LowerBlock(BasicBlock* block); GenTree* LowerNode(GenTree* node); bool IsInvariantInRange(GenTree* node, GenTree* endExclusive); // ------------------------------ // Call Lowering // ------------------------------ void LowerCall(GenTree* call); void LowerCFGCall(GenTreeCall* call); void MoveCFGCallArg(GenTreeCall* call, GenTree* node); #ifndef TARGET_64BIT GenTree* DecomposeLongCompare(GenTree* cmp); #endif GenTree* OptimizeConstCompare(GenTree* cmp); GenTree* LowerCompare(GenTree* cmp); GenTree* LowerJTrue(GenTreeOp* jtrue); GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition); void LowerJmpMethod(GenTree* jmp); void LowerRet(GenTreeUnOp* ret); void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar); void LowerRetStruct(GenTreeUnOp* ret); void LowerRetSingleRegStructLclVar(GenTreeUnOp* ret); void LowerCallStruct(GenTreeCall* call); void LowerStoreSingleRegCallStruct(GenTreeBlk* store); #if !defined(WINDOWS_AMD64_ABI) GenTreeLclVar* SpillStructCallResult(GenTreeCall* call) const; #endif // WINDOWS_AMD64_ABI GenTree* LowerDelegateInvoke(GenTreeCall* call); GenTree* LowerIndirectNonvirtCall(GenTreeCall* call); GenTree* LowerDirectCall(GenTreeCall* call); GenTree* LowerNonvirtPinvokeCall(GenTreeCall* call); GenTree* LowerTailCallViaJitHelper(GenTreeCall* callNode, GenTree* callTarget); void LowerFastTailCall(GenTreeCall* callNode); void RehomeArgForFastTailCall(unsigned int lclNum, GenTree* insertTempBefore, GenTree* lookForUsesStart, GenTreeCall* callNode); void InsertProfTailCallHook(GenTreeCall* callNode, GenTree* insertionPoint); GenTree* LowerVirtualVtableCall(GenTreeCall* call); GenTree* LowerVirtualStubCall(GenTreeCall* call); void LowerArgsForCall(GenTreeCall* call); void ReplaceArgWithPutArgOrBitcast(GenTree** ppChild, GenTree* newNode); GenTree* NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type); void LowerArg(GenTreeCall* call, GenTree** ppTree); #ifdef TARGET_ARMARCH GenTree* LowerFloatArg(GenTree** pArg, fgArgTabEntry* info); GenTree* LowerFloatArgReg(GenTree* arg, regNumber regNum); #endif void InsertPInvokeCallProlog(GenTreeCall* call); void InsertPInvokeCallEpilog(GenTreeCall* call); void InsertPInvokeMethodProlog(); void InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr)); GenTree* SetGCState(int cns); GenTree* CreateReturnTrapSeq(); enum FrameLinkAction { PushFrame, PopFrame }; GenTree* CreateFrameLinkUpdate(FrameLinkAction); GenTree* AddrGen(ssize_t addr); GenTree* AddrGen(void* addr); GenTree* Ind(GenTree* tree, var_types type = TYP_I_IMPL) { return comp->gtNewOperNode(GT_IND, type, tree); } GenTree* PhysReg(regNumber reg, var_types type = TYP_I_IMPL) { return comp->gtNewPhysRegNode(reg, type); } GenTree* ThisReg(GenTreeCall* call) { return PhysReg(comp->codeGen->genGetThisArgReg(call), TYP_REF); } GenTree* Offset(GenTree* base, unsigned offset) { var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet(); return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, nullptr, 0, offset); } GenTree* OffsetByIndex(GenTree* base, GenTree* index) { var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet(); return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, 0, 0); } GenTree* OffsetByIndexWithScale(GenTree* base, GenTree* index, unsigned scale) { var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet(); return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, scale, 0); } // Replace the definition of the given use with a lclVar, allocating a new temp // if 'tempNum' is BAD_VAR_NUM. Returns the LclVar node. GenTreeLclVar* ReplaceWithLclVar(LIR::Use& use, unsigned tempNum = BAD_VAR_NUM) { GenTree* oldUseNode = use.Def(); if ((oldUseNode->gtOper != GT_LCL_VAR) || (tempNum != BAD_VAR_NUM)) { GenTree* assign; use.ReplaceWithLclVar(comp, tempNum, &assign); GenTree* newUseNode = use.Def(); ContainCheckRange(oldUseNode->gtNext, newUseNode); // We need to lower the LclVar and assignment since there may be certain // types or scenarios, such as TYP_SIMD12, that need special handling LowerNode(assign); LowerNode(newUseNode); return newUseNode->AsLclVar(); } return oldUseNode->AsLclVar(); } // return true if this call target is within range of a pc-rel call on the machine bool IsCallTargetInRange(void* addr); #if defined(TARGET_XARCH) GenTree* PreferredRegOptionalOperand(GenTree* tree); // ------------------------------------------------------------------ // SetRegOptionalBinOp - Indicates which of the operands of a bin-op // register requirement is optional. Xarch instruction set allows // either of op1 or op2 of binary operation (e.g. add, mul etc) to be // a memory operand. This routine provides info to register allocator // which of its operands optionally require a register. Lsra might not // allocate a register to RefTypeUse positions of such operands if it // is beneficial. In such a case codegen will treat them as memory // operands. // // Arguments: // tree - Gentree of a binary operation. // isSafeToMarkOp1 True if it's safe to mark op1 as register optional // isSafeToMarkOp2 True if it's safe to mark op2 as register optional // // Returns // The caller is expected to get isSafeToMarkOp1 and isSafeToMarkOp2 // by calling IsSafeToContainMem. // // Note: On xarch at most only one of the operands will be marked as // reg optional, even when both operands could be considered register // optional. void SetRegOptionalForBinOp(GenTree* tree, bool isSafeToMarkOp1, bool isSafeToMarkOp2) { assert(GenTree::OperIsBinary(tree->OperGet())); GenTree* const op1 = tree->gtGetOp1(); GenTree* const op2 = tree->gtGetOp2(); const unsigned operatorSize = genTypeSize(tree->TypeGet()); const bool op1Legal = isSafeToMarkOp1 && tree->OperIsCommutative() && (operatorSize == genTypeSize(op1->TypeGet())); const bool op2Legal = isSafeToMarkOp2 && (operatorSize == genTypeSize(op2->TypeGet())); GenTree* regOptionalOperand = nullptr; if (op1Legal) { regOptionalOperand = op2Legal ? PreferredRegOptionalOperand(tree) : op1; } else if (op2Legal) { regOptionalOperand = op2; } if (regOptionalOperand != nullptr) { regOptionalOperand->SetRegOptional(); } } #endif // defined(TARGET_XARCH) // Per tree node member functions void LowerStoreIndirCommon(GenTreeStoreInd* ind); void LowerIndir(GenTreeIndir* ind); void LowerStoreIndir(GenTreeStoreInd* node); GenTree* LowerAdd(GenTreeOp* node); GenTree* LowerMul(GenTreeOp* mul); GenTree* LowerBinaryArithmetic(GenTreeOp* binOp); bool LowerUnsignedDivOrMod(GenTreeOp* divMod); GenTree* LowerConstIntDivOrMod(GenTree* node); GenTree* LowerSignedDivOrMod(GenTree* node); void LowerBlockStore(GenTreeBlk* blkNode); void LowerBlockStoreCommon(GenTreeBlk* blkNode); void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr); void LowerPutArgStk(GenTreePutArgStk* tree); bool TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* parent); bool TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode); GenTree* LowerSwitch(GenTree* node); bool TryLowerSwitchToBitTest( BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue); void LowerCast(GenTree* node); #if !CPU_LOAD_STORE_ARCH bool IsRMWIndirCandidate(GenTree* operand, GenTree* storeInd); bool IsBinOpInRMWStoreInd(GenTree* tree); bool IsRMWMemOpRootedAtStoreInd(GenTree* storeIndTree, GenTree** indirCandidate, GenTree** indirOpSource); bool LowerRMWMemOp(GenTreeIndir* storeInd); #endif void WidenSIMD12IfNecessary(GenTreeLclVarCommon* node); bool CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc); void LowerStoreLoc(GenTreeLclVarCommon* tree); GenTree* LowerArrElem(GenTree* node); void LowerRotate(GenTree* tree); void LowerShift(GenTreeOp* shift); #ifdef FEATURE_SIMD void LowerSIMD(GenTreeSIMD* simdNode); #endif // FEATURE_SIMD #ifdef FEATURE_HW_INTRINSICS void LowerHWIntrinsic(GenTreeHWIntrinsic* node); void LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition); void LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp); void LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node); void LowerHWIntrinsicDot(GenTreeHWIntrinsic* node); #if defined(TARGET_XARCH) void LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node); void LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node); void LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node); void LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node); GenTree* TryLowerAndOpToResetLowestSetBit(GenTreeOp* andNode); GenTree* TryLowerAndOpToAndNot(GenTreeOp* andNode); #elif defined(TARGET_ARM64) bool IsValidConstForMovImm(GenTreeHWIntrinsic* node); void LowerHWIntrinsicFusedMultiplyAddScalar(GenTreeHWIntrinsic* node); #endif // !TARGET_XARCH && !TARGET_ARM64 union VectorConstant { int8_t i8[32]; uint8_t u8[32]; int16_t i16[16]; uint16_t u16[16]; int32_t i32[8]; uint32_t u32[8]; int64_t i64[4]; uint64_t u64[4]; float f32[8]; double f64[4]; }; //---------------------------------------------------------------------------------------------- // VectorConstantIsBroadcastedI64: Check N i64 elements in a constant vector for equality // // Arguments: // vecCns - Constant vector // count - Amount of i64 components to compare // // Returns: // true if N i64 elements of the given vector are equal static bool VectorConstantIsBroadcastedI64(VectorConstant& vecCns, int count) { assert(count >= 1 && count <= 4); for (int i = 1; i < count; i++) { if (vecCns.i64[i] != vecCns.i64[0]) { return false; } } return true; } //---------------------------------------------------------------------------------------------- // ProcessArgForHWIntrinsicCreate: Processes an argument for the Lowering::LowerHWIntrinsicCreate method // // Arguments: // arg - The argument to process // argIdx - The index of the argument being processed // vecCns - The vector constant being constructed // baseType - The base type of the vector constant // // Returns: // true if arg was a constant; otherwise, false static bool HandleArgForHWIntrinsicCreate(GenTree* arg, int argIdx, VectorConstant& vecCns, var_types baseType) { switch (baseType) { case TYP_BYTE: case TYP_UBYTE: { if (arg->IsCnsIntOrI()) { vecCns.i8[argIdx] = static_cast<int8_t>(arg->AsIntCon()->gtIconVal); return true; } else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i8[argIdx] == 0); } break; } case TYP_SHORT: case TYP_USHORT: { if (arg->IsCnsIntOrI()) { vecCns.i16[argIdx] = static_cast<int16_t>(arg->AsIntCon()->gtIconVal); return true; } else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i16[argIdx] == 0); } break; } case TYP_INT: case TYP_UINT: { if (arg->IsCnsIntOrI()) { vecCns.i32[argIdx] = static_cast<int32_t>(arg->AsIntCon()->gtIconVal); return true; } else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i32[argIdx] == 0); } break; } case TYP_LONG: case TYP_ULONG: { #if defined(TARGET_64BIT) if (arg->IsCnsIntOrI()) { vecCns.i64[argIdx] = static_cast<int64_t>(arg->AsIntCon()->gtIconVal); return true; } #else if (arg->OperIsLong() && arg->AsOp()->gtOp1->IsCnsIntOrI() && arg->AsOp()->gtOp2->IsCnsIntOrI()) { // 32-bit targets will decompose GT_CNS_LNG into two GT_CNS_INT // We need to reconstruct the 64-bit value in order to handle this INT64 gtLconVal = arg->AsOp()->gtOp2->AsIntCon()->gtIconVal; gtLconVal <<= 32; gtLconVal |= arg->AsOp()->gtOp1->AsIntCon()->gtIconVal; vecCns.i64[argIdx] = gtLconVal; return true; } #endif // TARGET_64BIT else { // We expect the VectorConstant to have been already zeroed assert(vecCns.i64[argIdx] == 0); } break; } case TYP_FLOAT: { if (arg->IsCnsFltOrDbl()) { vecCns.f32[argIdx] = static_cast<float>(arg->AsDblCon()->gtDconVal); return true; } else { // We expect the VectorConstant to have been already zeroed // We check against the i32, rather than f32, to account for -0.0 assert(vecCns.i32[argIdx] == 0); } break; } case TYP_DOUBLE: { if (arg->IsCnsFltOrDbl()) { vecCns.f64[argIdx] = static_cast<double>(arg->AsDblCon()->gtDconVal); return true; } else { // We expect the VectorConstant to have been already zeroed // We check against the i64, rather than f64, to account for -0.0 assert(vecCns.i64[argIdx] == 0); } break; } default: { unreached(); } } return false; } #endif // FEATURE_HW_INTRINSICS //---------------------------------------------------------------------------------------------- // TryRemoveCastIfPresent: Removes op it is a cast operation and the size of its input is at // least the size of expectedType // // Arguments: // expectedType - The expected type of the cast operation input if it is to be removed // op - The tree to remove if it is a cast op whose input is at least the size of expectedType // // Returns: // op if it was not a cast node or if its input is not at least the size of expected type; // Otherwise, it returns the underlying operation that was being casted GenTree* TryRemoveCastIfPresent(var_types expectedType, GenTree* op) { if (!op->OperIs(GT_CAST)) { return op; } GenTree* castOp = op->AsCast()->CastOp(); if (genTypeSize(castOp->gtType) >= genTypeSize(expectedType)) { BlockRange().Remove(op); return castOp; } return op; } // Utility functions public: static bool IndirsAreEquivalent(GenTree* pTreeA, GenTree* pTreeB); // return true if 'childNode' is an immediate that can be contained // by the 'parentNode' (i.e. folded into an instruction) // for example small enough and non-relocatable bool IsContainableImmed(GenTree* parentNode, GenTree* childNode) const; // Return true if 'node' is a containable memory op. bool IsContainableMemoryOp(GenTree* node) const { return m_lsra->isContainableMemoryOp(node); } #ifdef FEATURE_HW_INTRINSICS // Tries to get a containable node for a given HWIntrinsic bool TryGetContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode, GenTree** pNode, bool* supportsRegOptional, GenTreeHWIntrinsic* transparentParentNode = nullptr); #endif // FEATURE_HW_INTRINSICS static void TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block); private: static bool NodesAreEquivalentLeaves(GenTree* candidate, GenTree* storeInd); bool AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index); // Makes 'childNode' contained in the 'parentNode' void MakeSrcContained(GenTree* parentNode, GenTree* childNode) const; // Checks and makes 'childNode' contained in the 'parentNode' bool CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode); // Checks for memory conflicts in the instructions between childNode and parentNode, and returns true if childNode // can be contained. bool IsSafeToContainMem(GenTree* parentNode, GenTree* childNode) const; // Similar to above, but allows bypassing a "transparent" parent. bool IsSafeToContainMem(GenTree* grandparentNode, GenTree* parentNode, GenTree* childNode) const; inline LIR::Range& BlockRange() const { return LIR::AsRange(m_block); } // Any tracked lclVar accessed by a LCL_FLD or STORE_LCL_FLD should be marked doNotEnregister. // This method checks, and asserts in the DEBUG case if it is not so marked, // but in the non-DEBUG case (asserts disabled) set the flag so that we don't generate bad code. // This ensures that the local's value is valid on-stack as expected for a *LCL_FLD. void verifyLclFldDoNotEnregister(unsigned lclNum) { LclVarDsc* varDsc = comp->lvaGetDesc(lclNum); // Do a couple of simple checks before setting lvDoNotEnregister. // This may not cover all cases in 'isRegCandidate()' but we don't want to // do an expensive check here. For non-candidates it is not harmful to set lvDoNotEnregister. if (varDsc->lvTracked && !varDsc->lvDoNotEnregister) { assert(!m_lsra->isRegCandidate(varDsc)); comp->lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::LocalField)); } } LinearScan* m_lsra; unsigned vtableCallTemp; // local variable we use as a temp for vtable calls mutable SideEffectSet m_scratchSideEffects; // SideEffectSet used for IsSafeToContainMem and isRMWIndirCandidate BasicBlock* m_block; }; #endif // _LOWER_H_
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/tests/palsuite/composite/synchronization/nativecs_interlocked/resultbuffer.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //#include "stdafx.h" #include "resultbuffer.h" ResultBuffer:: ResultBuffer(int ThreadCount, int ThreadLogSize) { // Declare an internal status variable int Status=0; // Update the maximum thread count MaxThreadCount = ThreadCount; // Allocate the memory buffer based on the passed in thread and process counts // and the specified size of the thread specific buffer buffer = NULL; buffer = (char*)malloc(ThreadCount*ThreadLogSize); // Check to see if the buffer memory was allocated if (buffer == NULL) Status = -1; // Initialize the buffer to 0 to prevent bogus data memset(buffer,0,ThreadCount*ThreadLogSize); // The ThreadOffset is equal to the total number of bytes that will be stored per thread ThreadOffset = ThreadLogSize; } int ResultBuffer::LogResult(int Thread, char* Data) { // Declare an internal status flad int status = 0; // Declare an object to store the offset address into the buffer int Offset; // Check to make sure the Thread index is not out of range if(Thread > MaxThreadCount) { printf("Thread index is out of range, Value of Thread[%d], Value of MaxThreadCount[%d]\n", Thread, MaxThreadCount); status = -1; return(status); } // Caculate the offset into the shared buffer based on the process and thread indices Offset = (Thread)*ThreadOffset; // Write the passed in data to the reserved buffer memcpy(buffer+Offset,Data,ThreadOffset); return(status); } char* ResultBuffer::getResultBuffer(int threadId) { return (buffer + threadId*ThreadOffset); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //#include "stdafx.h" #include "resultbuffer.h" ResultBuffer:: ResultBuffer(int ThreadCount, int ThreadLogSize) { // Declare an internal status variable int Status=0; // Update the maximum thread count MaxThreadCount = ThreadCount; // Allocate the memory buffer based on the passed in thread and process counts // and the specified size of the thread specific buffer buffer = NULL; buffer = (char*)malloc(ThreadCount*ThreadLogSize); // Check to see if the buffer memory was allocated if (buffer == NULL) Status = -1; // Initialize the buffer to 0 to prevent bogus data memset(buffer,0,ThreadCount*ThreadLogSize); // The ThreadOffset is equal to the total number of bytes that will be stored per thread ThreadOffset = ThreadLogSize; } int ResultBuffer::LogResult(int Thread, char* Data) { // Declare an internal status flad int status = 0; // Declare an object to store the offset address into the buffer int Offset; // Check to make sure the Thread index is not out of range if(Thread > MaxThreadCount) { printf("Thread index is out of range, Value of Thread[%d], Value of MaxThreadCount[%d]\n", Thread, MaxThreadCount); status = -1; return(status); } // Caculate the offset into the shared buffer based on the process and thread indices Offset = (Thread)*ThreadOffset; // Write the passed in data to the reserved buffer memcpy(buffer+Offset,Data,ThreadOffset); return(status); } char* ResultBuffer::getResultBuffer(int threadId) { return (buffer + threadId*ThreadOffset); }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/native/corehost/bundle/runner.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __RUNNER_H__ #define __RUNNER_H__ #include "error_codes.h" #include "header.h" #include "manifest.h" #include "info.h" // bundle::runner extends bundle::info to supports: // * Reading the bundle manifest and identifying file locations for the runtime // * Extracting bundled files to disk when necessary // bundle::runner is used by HostPolicy. namespace bundle { class runner_t : public info_t { public: runner_t(const pal::char_t* bundle_path, const pal::char_t* app_path, int64_t header_offset) : info_t(bundle_path, app_path, header_offset) {} const pal::string_t& extraction_path() const { return m_extraction_path; } bool has_base(const pal::string_t& base) const { return base.compare(base_path()) == 0; } bool probe(const pal::string_t& relative_path, int64_t* offset, int64_t* size, int64_t* compressedSize) const; const file_entry_t* probe(const pal::string_t& relative_path) const; bool locate(const pal::string_t& relative_path, pal::string_t& full_path, bool& extracted_to_disk) const; bool locate(const pal::string_t& relative_path, pal::string_t& full_path) const { bool extracted_to_disk; return locate(relative_path, full_path, extracted_to_disk); } bool disable(const pal::string_t& relative_path); static StatusCode process_manifest_and_extract() { return mutable_app()->extract(); } static const runner_t* app() { return (const runner_t*)the_app; } static runner_t* mutable_app() { return (runner_t*)the_app; } private: StatusCode extract(); manifest_t m_manifest; pal::string_t m_extraction_path; }; } #endif // __RUNNER_H__
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef __RUNNER_H__ #define __RUNNER_H__ #include "error_codes.h" #include "header.h" #include "manifest.h" #include "info.h" // bundle::runner extends bundle::info to supports: // * Reading the bundle manifest and identifying file locations for the runtime // * Extracting bundled files to disk when necessary // bundle::runner is used by HostPolicy. namespace bundle { class runner_t : public info_t { public: runner_t(const pal::char_t* bundle_path, const pal::char_t* app_path, int64_t header_offset) : info_t(bundle_path, app_path, header_offset) {} const pal::string_t& extraction_path() const { return m_extraction_path; } bool has_base(const pal::string_t& base) const { return base.compare(base_path()) == 0; } bool probe(const pal::string_t& relative_path, int64_t* offset, int64_t* size, int64_t* compressedSize) const; const file_entry_t* probe(const pal::string_t& relative_path) const; bool locate(const pal::string_t& relative_path, pal::string_t& full_path, bool& extracted_to_disk) const; bool locate(const pal::string_t& relative_path, pal::string_t& full_path) const { bool extracted_to_disk; return locate(relative_path, full_path, extracted_to_disk); } bool disable(const pal::string_t& relative_path); static StatusCode process_manifest_and_extract() { return mutable_app()->extract(); } static const runner_t* app() { return (const runner_t*)the_app; } static runner_t* mutable_app() { return (runner_t*)the_app; } private: StatusCode extract(); manifest_t m_manifest; pal::string_t m_extraction_path; }; } #endif // __RUNNER_H__
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/tests/palsuite/file_io/FlushFileBuffers/test1/FlushFileBuffers.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: FlushFileBuffers.c ** ** Purpose: Tests the PAL implementation of the FlushFileBuffers function ** This tests checks the return values of FlushFileBuffers -- once on an ** open handle and once on a closed handle. ** ** Depends: ** CreateFile ** WriteFile ** CloseHandle ** DeleteFileA ** ** **===================================================================*/ #include <palsuite.h> PALTEST(file_io_FlushFileBuffers_test1_paltest_flushfilebuffers_test1, "file_io/FlushFileBuffers/test1/paltest_flushfilebuffers_test1") { int TheReturn; HANDLE TheFileHandle; DWORD temp; DWORD originalSize=10000; DWORD finalSize=10000; const char* fileName="the_file"; /* 1 2 3 4*/ char * SomeText = "1234567890123456789012345678901234567890"; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } /* Open the file to get a HANDLE */ TheFileHandle = CreateFile( fileName, GENERIC_READ|GENERIC_WRITE, FILE_SHARE_READ, NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if(TheFileHandle == INVALID_HANDLE_VALUE) { Fail("ERROR: CreateFile failed. Test depends on this function."); } /* get the file size */ originalSize = GetFileSize (TheFileHandle, NULL) ; if(originalSize == INVALID_FILE_SIZE) { Fail("ERROR: call to GetFileSize faild with error " "The GetLastError is %d.",GetLastError()); } /* Write something too the HANDLE. Should be buffered */ TheReturn = WriteFile(TheFileHandle, SomeText, strlen(SomeText), &temp, NULL); if(TheReturn == 0) { Fail("ERROR: WriteFile failed. Test depends on this function."); } /* Test to see that FlushFileBuffers returns a success value */ TheReturn = FlushFileBuffers(TheFileHandle); if(TheReturn == 0) { Fail("ERROR: The FlushFileBuffers function returned 0, which " "indicates failure, when trying to flush a valid HANDLE. " "The GetLastError is %d.",GetLastError()); } /* test if flush modified the file */ finalSize = GetFileSize (TheFileHandle, NULL) ; if(finalSize==INVALID_FILE_SIZE) { Fail("ERROR: call to GetFileSize faild with error " "The GetLastError is %d.",GetLastError()); } if(finalSize!=(originalSize+strlen(SomeText))) { Fail("ERROR: FlushFileBuffers failed. data was not written to the file"); } /* Close the Handle */ TheReturn = CloseHandle(TheFileHandle); if(TheReturn == 0) { Fail("ERROR: CloseHandle failed. This function depends " "upon it."); } /* Test to see that FlushFileBuffers returns a failure value */ TheReturn = FlushFileBuffers(TheFileHandle); if(TheReturn != 0) { Fail("ERROR: The FlushFileBuffers function returned non-zero, " "which indicates success, when trying to flush an invalid " "HANDLE."); } /* make sure file does not exist */ if(DeleteFileA(fileName)== 0 ) { Fail("ERROR: call to DeleteFileA faild with error " "The GetLastError is %d.",GetLastError()); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: FlushFileBuffers.c ** ** Purpose: Tests the PAL implementation of the FlushFileBuffers function ** This tests checks the return values of FlushFileBuffers -- once on an ** open handle and once on a closed handle. ** ** Depends: ** CreateFile ** WriteFile ** CloseHandle ** DeleteFileA ** ** **===================================================================*/ #include <palsuite.h> PALTEST(file_io_FlushFileBuffers_test1_paltest_flushfilebuffers_test1, "file_io/FlushFileBuffers/test1/paltest_flushfilebuffers_test1") { int TheReturn; HANDLE TheFileHandle; DWORD temp; DWORD originalSize=10000; DWORD finalSize=10000; const char* fileName="the_file"; /* 1 2 3 4*/ char * SomeText = "1234567890123456789012345678901234567890"; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } /* Open the file to get a HANDLE */ TheFileHandle = CreateFile( fileName, GENERIC_READ|GENERIC_WRITE, FILE_SHARE_READ, NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); if(TheFileHandle == INVALID_HANDLE_VALUE) { Fail("ERROR: CreateFile failed. Test depends on this function."); } /* get the file size */ originalSize = GetFileSize (TheFileHandle, NULL) ; if(originalSize == INVALID_FILE_SIZE) { Fail("ERROR: call to GetFileSize faild with error " "The GetLastError is %d.",GetLastError()); } /* Write something too the HANDLE. Should be buffered */ TheReturn = WriteFile(TheFileHandle, SomeText, strlen(SomeText), &temp, NULL); if(TheReturn == 0) { Fail("ERROR: WriteFile failed. Test depends on this function."); } /* Test to see that FlushFileBuffers returns a success value */ TheReturn = FlushFileBuffers(TheFileHandle); if(TheReturn == 0) { Fail("ERROR: The FlushFileBuffers function returned 0, which " "indicates failure, when trying to flush a valid HANDLE. " "The GetLastError is %d.",GetLastError()); } /* test if flush modified the file */ finalSize = GetFileSize (TheFileHandle, NULL) ; if(finalSize==INVALID_FILE_SIZE) { Fail("ERROR: call to GetFileSize faild with error " "The GetLastError is %d.",GetLastError()); } if(finalSize!=(originalSize+strlen(SomeText))) { Fail("ERROR: FlushFileBuffers failed. data was not written to the file"); } /* Close the Handle */ TheReturn = CloseHandle(TheFileHandle); if(TheReturn == 0) { Fail("ERROR: CloseHandle failed. This function depends " "upon it."); } /* Test to see that FlushFileBuffers returns a failure value */ TheReturn = FlushFileBuffers(TheFileHandle); if(TheReturn != 0) { Fail("ERROR: The FlushFileBuffers function returned non-zero, " "which indicates success, when trying to flush an invalid " "HANDLE."); } /* make sure file does not exist */ if(DeleteFileA(fileName)== 0 ) { Fail("ERROR: call to DeleteFileA faild with error " "The GetLastError is %d.",GetLastError()); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/tests/palsuite/composite/object_management/semaphore/shared/main.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source Code: main.c and semaphore.c ** main.c creates process and waits for all processes to get over ** semaphore.c creates a semaphore and then calls threads which will contend for the semaphore ** ** This test is for Object Management Test case for semaphore where Object type is shareable. ** Algorithm ** o Main Process Creates OBJECT_TYPE Object ** o Create PROCESS_COUNT processes aware of the Shared Object ** ** ** **============================================================ */ #include <palsuite.h> #include "resulttime.h" /* Test Input Variables */ unsigned int PROCESS_COUNT = 1; unsigned int THREAD_COUNT = 1; unsigned int REPEAT_COUNT = 4; unsigned int RELATION_ID = 1001; unsigned long lInitialCount = 1; /* Signaled */ unsigned long lMaximumCount = 1; /* Maximum value of 1 */ char objectSuffix[MAX_PATH]; struct TestStats{ DWORD operationTime; unsigned int relationId; unsigned int processCount; unsigned int threadCount; unsigned int repeatCount; char* buildNumber; }; int GetParameters( int argc, char **argv) { if( (!((argc == 5) || (argc == 6) ) )|| ((argc == 1) && !strcmp(argv[1],"/?")) || !strcmp(argv[1],"/h") || !strcmp(argv[1],"/H")) { printf("PAL -Composite Object Management event Test\n"); printf("Usage:\n"); printf("main\n\t[PROCESS_COUNT (greater than 1)] \n"); printf("\t[THREAD_COUNT (greater than 1)] \n"); printf("\t[REPEAT_COUNT (greater than 1)]\n"); printf("\t[RELATION_ID [greater than or equal to 1]\n"); printf("\t[Object Name Suffix]\n"); return -1; } PROCESS_COUNT = atoi(argv[1]); if( (PROCESS_COUNT < 1) || (PROCESS_COUNT > MAXIMUM_WAIT_OBJECTS) ) { printf("\nMain Process:Invalid PROCESS_COUNT number, Pass greater than 1 and less than PROCESS_COUNT %d\n", MAXIMUM_WAIT_OBJECTS); return -1; } THREAD_COUNT = atoi(argv[2]); if( (THREAD_COUNT < 1) || (THREAD_COUNT > MAXIMUM_WAIT_OBJECTS) ) { printf("\nInvalid THREAD_COUNT number, Pass greater than 1 and less than %d\n", MAXIMUM_WAIT_OBJECTS); return -1; } REPEAT_COUNT = atoi(argv[3]); if( REPEAT_COUNT < 1) { printf("\nMain Process:Invalid REPEAT_COUNT number, Pass greater than 1\n"); return -1; } RELATION_ID = atoi(argv[4]); if( RELATION_ID < 1) { printf("\nMain Process:Invalid RELATION_ID number, Pass greater than 1\n"); return -1; } if(argc == 6) { strncpy(objectSuffix, argv[5], MAX_PATH-1); } return 0; } PALTEST(composite_object_management_semaphore_shared_paltest_semaphore_shared, "composite/object_management/semaphore/shared/paltest_semaphore_shared") { unsigned int i = 0; HANDLE hProcess[MAXIMUM_WAIT_OBJECTS]; HANDLE hSemaphoreHandle; STARTUPINFO si[MAXIMUM_WAIT_OBJECTS]; PROCESS_INFORMATION pi[MAXIMUM_WAIT_OBJECTS]; char lpCommandLine[MAX_PATH] = ""; char ObjName[MAX_PATH] = "SHARED_SEMAPHORE"; int returnCode = 0; DWORD processReturnCode = 0; int testReturnCode = PASS; char fileName[MAX_PATH]; FILE *pFile = NULL; DWORD dwStartTime; struct TestStats testStats; if(0 != (PAL_Initialize(argc, argv))) { return ( FAIL ); } /* "While the new PAL does support named semaphore it's unclear if we should change the Windows PAL, since we share that w/ Rotor and they are still using the old PAL. For the time being it may make the most sense to just skip the named semaphore test on Windows - from an object management perspective it doesn't really gain us anything over what we already have." */ ZeroMemory( objectSuffix, MAX_PATH ); if(GetParameters(argc, argv)) { Fail("Error in obtaining the parameters\n"); } if(argc == 6) { strncat(ObjName, objectSuffix, MAX_PATH - (sizeof(ObjName) + 1) ); } /* Register the start time */ dwStartTime = GetTickCount(); testStats.relationId = RELATION_ID; testStats.processCount = PROCESS_COUNT; testStats.threadCount = THREAD_COUNT; testStats.repeatCount = REPEAT_COUNT; testStats.buildNumber = getBuildNumber(); _snprintf(fileName, MAX_PATH, "main_semaphore_%d_.txt", RELATION_ID); pFile = fopen(fileName, "w+"); if(pFile == NULL) { Fail("Error in opening main file for write\n"); } hSemaphoreHandle = CreateSemaphore( NULL, /* lpSemaphoreAttributes */ lInitialCount, /*lInitialCount*/ lMaximumCount, /*lMaximumCount */ ObjName, 0, 0 ); if( hSemaphoreHandle == NULL) { Fail("Unable to create shared Semaphore handle @ Main returned error [%d]\n", GetLastError()); } for( i = 0; i < PROCESS_COUNT; i++ ) { ZeroMemory( lpCommandLine, MAX_PATH ); if ( _snprintf( lpCommandLine, MAX_PATH-1, "semaphore %d %d %d %d %s", i, THREAD_COUNT, REPEAT_COUNT, RELATION_ID, objectSuffix) < 0 ) { Fail("Error: Insufficient semaphore name string length for %s for iteration [%d]\n", ObjName, i); } /* Zero the data structure space */ ZeroMemory ( &pi[i], sizeof(pi[i]) ); ZeroMemory ( &si[i], sizeof(si[i]) ); /* Set the process flags and standard io handles */ si[i].cb = sizeof(si[i]); if(!CreateProcess( NULL, /* lpApplicationName*/ lpCommandLine, /* lpCommandLine */ NULL, /* lpProcessAttributes */ NULL, /* lpThreadAttributes */ TRUE, /* bInheritHandles */ 0, /* dwCreationFlags, */ NULL, /* lpEnvironment */ NULL, /* pCurrentDirectory */ &si[i], /* lpStartupInfo */ &pi[i] /* lpProcessInformation */ )) { Fail("Process Not created for [%d], the error code is [%d]\n", i, GetLastError()); } else { hProcess[i] = pi[i].hProcess; // Trace("Process created for [%d]\n", i); } } returnCode = WaitForMultipleObjects( PROCESS_COUNT, hProcess, TRUE, INFINITE); if( WAIT_OBJECT_0 != returnCode ) { Trace("Wait for Object(s) @ Main thread for %d processes returned %d, and GetLastError value is %d\n", PROCESS_COUNT, returnCode, GetLastError()); testReturnCode = FAIL; } for( i = 0; i < PROCESS_COUNT; i++ ) { /* check the exit code from the process */ if( ! GetExitCodeProcess( pi[i].hProcess, &processReturnCode ) ) { Trace( "GetExitCodeProcess call failed for iteration %d with error code %u\n", i, GetLastError() ); testReturnCode = FAIL; } if(processReturnCode == FAIL) { Trace( "Process [%d] failed and returned FAIL\n", i); testReturnCode = FAIL; } if(!CloseHandle(pi[i].hThread)) { Trace("Error:%d: CloseHandle failed for Process [%d] hThread\n", GetLastError(), i); testReturnCode = FAIL; } if(!CloseHandle(pi[i].hProcess) ) { Trace("Error:%d: CloseHandle failed for Process [%d] hProcess\n", GetLastError(), i); testReturnCode = FAIL; } } testStats.operationTime = GetTimeDiff(dwStartTime); fprintf(pFile, "%d,%d,%d,%d,%d,%s\n", testStats.operationTime, testStats.relationId, testStats.processCount, testStats.threadCount, testStats.repeatCount, testStats.buildNumber); if(fclose(pFile)) { Trace("Error: fclose failed for pFile\n"); testReturnCode = FAIL; }; if(!CloseHandle(hSemaphoreHandle)) { Trace("Error:%d: CloseHandle failed for hSemaphoreHandle\n", GetLastError()); testReturnCode = FAIL; } if( testReturnCode == PASS) { Trace("Test Passed\n"); } else { Trace("Test Failed\n"); } PAL_Terminate(); return testReturnCode; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source Code: main.c and semaphore.c ** main.c creates process and waits for all processes to get over ** semaphore.c creates a semaphore and then calls threads which will contend for the semaphore ** ** This test is for Object Management Test case for semaphore where Object type is shareable. ** Algorithm ** o Main Process Creates OBJECT_TYPE Object ** o Create PROCESS_COUNT processes aware of the Shared Object ** ** ** **============================================================ */ #include <palsuite.h> #include "resulttime.h" /* Test Input Variables */ unsigned int PROCESS_COUNT = 1; unsigned int THREAD_COUNT = 1; unsigned int REPEAT_COUNT = 4; unsigned int RELATION_ID = 1001; unsigned long lInitialCount = 1; /* Signaled */ unsigned long lMaximumCount = 1; /* Maximum value of 1 */ char objectSuffix[MAX_PATH]; struct TestStats{ DWORD operationTime; unsigned int relationId; unsigned int processCount; unsigned int threadCount; unsigned int repeatCount; char* buildNumber; }; int GetParameters( int argc, char **argv) { if( (!((argc == 5) || (argc == 6) ) )|| ((argc == 1) && !strcmp(argv[1],"/?")) || !strcmp(argv[1],"/h") || !strcmp(argv[1],"/H")) { printf("PAL -Composite Object Management event Test\n"); printf("Usage:\n"); printf("main\n\t[PROCESS_COUNT (greater than 1)] \n"); printf("\t[THREAD_COUNT (greater than 1)] \n"); printf("\t[REPEAT_COUNT (greater than 1)]\n"); printf("\t[RELATION_ID [greater than or equal to 1]\n"); printf("\t[Object Name Suffix]\n"); return -1; } PROCESS_COUNT = atoi(argv[1]); if( (PROCESS_COUNT < 1) || (PROCESS_COUNT > MAXIMUM_WAIT_OBJECTS) ) { printf("\nMain Process:Invalid PROCESS_COUNT number, Pass greater than 1 and less than PROCESS_COUNT %d\n", MAXIMUM_WAIT_OBJECTS); return -1; } THREAD_COUNT = atoi(argv[2]); if( (THREAD_COUNT < 1) || (THREAD_COUNT > MAXIMUM_WAIT_OBJECTS) ) { printf("\nInvalid THREAD_COUNT number, Pass greater than 1 and less than %d\n", MAXIMUM_WAIT_OBJECTS); return -1; } REPEAT_COUNT = atoi(argv[3]); if( REPEAT_COUNT < 1) { printf("\nMain Process:Invalid REPEAT_COUNT number, Pass greater than 1\n"); return -1; } RELATION_ID = atoi(argv[4]); if( RELATION_ID < 1) { printf("\nMain Process:Invalid RELATION_ID number, Pass greater than 1\n"); return -1; } if(argc == 6) { strncpy(objectSuffix, argv[5], MAX_PATH-1); } return 0; } PALTEST(composite_object_management_semaphore_shared_paltest_semaphore_shared, "composite/object_management/semaphore/shared/paltest_semaphore_shared") { unsigned int i = 0; HANDLE hProcess[MAXIMUM_WAIT_OBJECTS]; HANDLE hSemaphoreHandle; STARTUPINFO si[MAXIMUM_WAIT_OBJECTS]; PROCESS_INFORMATION pi[MAXIMUM_WAIT_OBJECTS]; char lpCommandLine[MAX_PATH] = ""; char ObjName[MAX_PATH] = "SHARED_SEMAPHORE"; int returnCode = 0; DWORD processReturnCode = 0; int testReturnCode = PASS; char fileName[MAX_PATH]; FILE *pFile = NULL; DWORD dwStartTime; struct TestStats testStats; if(0 != (PAL_Initialize(argc, argv))) { return ( FAIL ); } /* "While the new PAL does support named semaphore it's unclear if we should change the Windows PAL, since we share that w/ Rotor and they are still using the old PAL. For the time being it may make the most sense to just skip the named semaphore test on Windows - from an object management perspective it doesn't really gain us anything over what we already have." */ ZeroMemory( objectSuffix, MAX_PATH ); if(GetParameters(argc, argv)) { Fail("Error in obtaining the parameters\n"); } if(argc == 6) { strncat(ObjName, objectSuffix, MAX_PATH - (sizeof(ObjName) + 1) ); } /* Register the start time */ dwStartTime = GetTickCount(); testStats.relationId = RELATION_ID; testStats.processCount = PROCESS_COUNT; testStats.threadCount = THREAD_COUNT; testStats.repeatCount = REPEAT_COUNT; testStats.buildNumber = getBuildNumber(); _snprintf(fileName, MAX_PATH, "main_semaphore_%d_.txt", RELATION_ID); pFile = fopen(fileName, "w+"); if(pFile == NULL) { Fail("Error in opening main file for write\n"); } hSemaphoreHandle = CreateSemaphore( NULL, /* lpSemaphoreAttributes */ lInitialCount, /*lInitialCount*/ lMaximumCount, /*lMaximumCount */ ObjName, 0, 0 ); if( hSemaphoreHandle == NULL) { Fail("Unable to create shared Semaphore handle @ Main returned error [%d]\n", GetLastError()); } for( i = 0; i < PROCESS_COUNT; i++ ) { ZeroMemory( lpCommandLine, MAX_PATH ); if ( _snprintf( lpCommandLine, MAX_PATH-1, "semaphore %d %d %d %d %s", i, THREAD_COUNT, REPEAT_COUNT, RELATION_ID, objectSuffix) < 0 ) { Fail("Error: Insufficient semaphore name string length for %s for iteration [%d]\n", ObjName, i); } /* Zero the data structure space */ ZeroMemory ( &pi[i], sizeof(pi[i]) ); ZeroMemory ( &si[i], sizeof(si[i]) ); /* Set the process flags and standard io handles */ si[i].cb = sizeof(si[i]); if(!CreateProcess( NULL, /* lpApplicationName*/ lpCommandLine, /* lpCommandLine */ NULL, /* lpProcessAttributes */ NULL, /* lpThreadAttributes */ TRUE, /* bInheritHandles */ 0, /* dwCreationFlags, */ NULL, /* lpEnvironment */ NULL, /* pCurrentDirectory */ &si[i], /* lpStartupInfo */ &pi[i] /* lpProcessInformation */ )) { Fail("Process Not created for [%d], the error code is [%d]\n", i, GetLastError()); } else { hProcess[i] = pi[i].hProcess; // Trace("Process created for [%d]\n", i); } } returnCode = WaitForMultipleObjects( PROCESS_COUNT, hProcess, TRUE, INFINITE); if( WAIT_OBJECT_0 != returnCode ) { Trace("Wait for Object(s) @ Main thread for %d processes returned %d, and GetLastError value is %d\n", PROCESS_COUNT, returnCode, GetLastError()); testReturnCode = FAIL; } for( i = 0; i < PROCESS_COUNT; i++ ) { /* check the exit code from the process */ if( ! GetExitCodeProcess( pi[i].hProcess, &processReturnCode ) ) { Trace( "GetExitCodeProcess call failed for iteration %d with error code %u\n", i, GetLastError() ); testReturnCode = FAIL; } if(processReturnCode == FAIL) { Trace( "Process [%d] failed and returned FAIL\n", i); testReturnCode = FAIL; } if(!CloseHandle(pi[i].hThread)) { Trace("Error:%d: CloseHandle failed for Process [%d] hThread\n", GetLastError(), i); testReturnCode = FAIL; } if(!CloseHandle(pi[i].hProcess) ) { Trace("Error:%d: CloseHandle failed for Process [%d] hProcess\n", GetLastError(), i); testReturnCode = FAIL; } } testStats.operationTime = GetTimeDiff(dwStartTime); fprintf(pFile, "%d,%d,%d,%d,%d,%s\n", testStats.operationTime, testStats.relationId, testStats.processCount, testStats.threadCount, testStats.repeatCount, testStats.buildNumber); if(fclose(pFile)) { Trace("Error: fclose failed for pFile\n"); testReturnCode = FAIL; }; if(!CloseHandle(hSemaphoreHandle)) { Trace("Error:%d: CloseHandle failed for hSemaphoreHandle\n", GetLastError()); testReturnCode = FAIL; } if( testReturnCode == PASS) { Trace("Test Passed\n"); } else { Trace("Test Failed\n"); } PAL_Terminate(); return testReturnCode; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/mono/mono/sgen/sgen-layout-stats.h
/** * \file * Copyright Xamarin Inc (http://www.xamarin.com) * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_SGEN_LAYOUT_STATS_H__ #define __MONO_SGEN_LAYOUT_STATS_H__ #ifdef SGEN_OBJECT_LAYOUT_STATISTICS #define SGEN_OBJECT_LAYOUT_BITMAP_BITS 16 void sgen_object_layout_scanned_bitmap (unsigned int bitmap); void sgen_object_layout_scanned_bitmap_overflow (void); void sgen_object_layout_scanned_ref_array (void); void sgen_object_layout_scanned_vtype_array (void); void sgen_object_layout_dump (FILE *out); #define SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP unsigned int __object_layout_bitmap = 0 #define SGEN_OBJECT_LAYOUT_STATISTICS_MARK_BITMAP(o,p) do { \ int __index = ((void**)(p)) - ((void**)(((char*)(o)) + SGEN_CLIENT_OBJECT_HEADER_SIZE)); \ if (__index >= SGEN_OBJECT_LAYOUT_BITMAP_BITS) \ __object_layout_bitmap = (unsigned int)-1; \ else if (__object_layout_bitmap != (unsigned int)-1) \ __object_layout_bitmap |= (1 << __index); \ } while (0) #define SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP do { \ if (__object_layout_bitmap == (unsigned int)-1) \ sgen_object_layout_scanned_bitmap_overflow (); \ else \ sgen_object_layout_scanned_bitmap (__object_layout_bitmap); \ } while (0) #else #define sgen_object_layout_scanned_bitmap(bitmap) #define sgen_object_layout_scanned_bitmap_overflow() #define sgen_object_layout_scanned_ref_array() #define sgen_object_layout_scanned_vtype_array() #define sgen_object_layout_dump(out) #define SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP #define SGEN_OBJECT_LAYOUT_STATISTICS_MARK_BITMAP(o,p) #define SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP #endif #endif
/** * \file * Copyright Xamarin Inc (http://www.xamarin.com) * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ #ifndef __MONO_SGEN_LAYOUT_STATS_H__ #define __MONO_SGEN_LAYOUT_STATS_H__ #ifdef SGEN_OBJECT_LAYOUT_STATISTICS #define SGEN_OBJECT_LAYOUT_BITMAP_BITS 16 void sgen_object_layout_scanned_bitmap (unsigned int bitmap); void sgen_object_layout_scanned_bitmap_overflow (void); void sgen_object_layout_scanned_ref_array (void); void sgen_object_layout_scanned_vtype_array (void); void sgen_object_layout_dump (FILE *out); #define SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP unsigned int __object_layout_bitmap = 0 #define SGEN_OBJECT_LAYOUT_STATISTICS_MARK_BITMAP(o,p) do { \ int __index = ((void**)(p)) - ((void**)(((char*)(o)) + SGEN_CLIENT_OBJECT_HEADER_SIZE)); \ if (__index >= SGEN_OBJECT_LAYOUT_BITMAP_BITS) \ __object_layout_bitmap = (unsigned int)-1; \ else if (__object_layout_bitmap != (unsigned int)-1) \ __object_layout_bitmap |= (1 << __index); \ } while (0) #define SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP do { \ if (__object_layout_bitmap == (unsigned int)-1) \ sgen_object_layout_scanned_bitmap_overflow (); \ else \ sgen_object_layout_scanned_bitmap (__object_layout_bitmap); \ } while (0) #else #define sgen_object_layout_scanned_bitmap(bitmap) #define sgen_object_layout_scanned_bitmap_overflow() #define sgen_object_layout_scanned_ref_array() #define sgen_object_layout_scanned_vtype_array() #define sgen_object_layout_dump(out) #define SGEN_OBJECT_LAYOUT_STATISTICS_DECLARE_BITMAP #define SGEN_OBJECT_LAYOUT_STATISTICS_MARK_BITMAP(o,p) #define SGEN_OBJECT_LAYOUT_STATISTICS_COMMIT_BITMAP #endif #endif
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/tests/palsuite/c_runtime/strtod/test2/test2.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test2.c ** ** Purpose: Tests strtod with overflows ** ** **===================================================================*/ #include <palsuite.h> PALTEST(c_runtime_strtod_test2_paltest_strtod_test2, "c_runtime/strtod/test2/paltest_strtod_test2") { /* Representation of positive infinty for a IEEE 64-bit double */ INT64 PosInifity = (INT64)(0x7ff00000) << 32; double HugeVal = *(double*) &PosInifity; char *PosStr = "1E+10000"; char *NegStr = "-1E+10000"; double result; if (PAL_Initialize(argc,argv)) { return FAIL; } result = strtod(PosStr, NULL); if (result != HugeVal) { Fail("ERROR: wcstod interpreted \"%s\" as %g instead of %g\n", PosStr, result, HugeVal); } result = strtod(NegStr, NULL); if (result != -HugeVal) { Fail("ERROR: wcstod interpreted \"%s\" as %g instead of %g\n", NegStr, result, -HugeVal); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: test2.c ** ** Purpose: Tests strtod with overflows ** ** **===================================================================*/ #include <palsuite.h> PALTEST(c_runtime_strtod_test2_paltest_strtod_test2, "c_runtime/strtod/test2/paltest_strtod_test2") { /* Representation of positive infinty for a IEEE 64-bit double */ INT64 PosInifity = (INT64)(0x7ff00000) << 32; double HugeVal = *(double*) &PosInifity; char *PosStr = "1E+10000"; char *NegStr = "-1E+10000"; double result; if (PAL_Initialize(argc,argv)) { return FAIL; } result = strtod(PosStr, NULL); if (result != HugeVal) { Fail("ERROR: wcstod interpreted \"%s\" as %g instead of %g\n", PosStr, result, HugeVal); } result = strtod(NegStr, NULL); if (result != -HugeVal) { Fail("ERROR: wcstod interpreted \"%s\" as %g instead of %g\n", NegStr, result, -HugeVal); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/Interop/BestFitMapping/BestFitMappingNative.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdio.h> #include <stdlib.h> #include <locale.h> #include <xplatform.h> #include <platformdefines.h> #pragma warning( push ) #pragma warning( disable : 4996) static int fails = 0; //record the fail numbers // Overload methods for reportfailure static int ReportFailure(const char* s) { printf(" === Fail:%s\n", s); return (++fails); } extern "C" DLL_EXPORT int __cdecl GetResult() { return fails; } //This method is used on Windows Only extern "C" DLL_EXPORT char __cdecl GetByteForWideChar() { #ifdef WINDOWS char * p = new char[3]; WideCharToMultiByte(CP_ACP, 0, W("\x263c"), -1, p, 2, NULL, NULL); p[1] = '\0'; char breturn = p[0]; delete p; return breturn; #else return 0; //It wont be called MAC #endif } //x86: Managed(Encoding: utf8)---->Marshaler(Encoding:ASCII)---->Native(Encoding:utf8) //MAC(x64):Managed(Encoding:utf8)----->Marshaler(Encoding:utf8)---->Native(Encoding:utf8) //Now both side(Managed Side and native side) takes the utf8 encoding when comparing string bool CheckInput(LPSTR str) { //int WideCharToMultiByte( // UINT CodePage, // DWORD dwFlags, // LPCWSTR lpWideCharStr, // int cchWideChar, // LPSTR lpMultiByteStr, // int cbMultiByte, // LPCSTR lpDefaultChar, // LPBOOL lpUsedDefaultChar //); #ifdef WINDOWS char * p = new char[3]; WideCharToMultiByte(CP_ACP, 0, W("\x263c"), -1, p, 2, NULL, NULL); p[1] = '\0'; #else char* p = new char[4]; //00bc98e2,the utf8 code of "\263c",we can get these char value through the following code with C# p[0] = (char)0xe2; //Encoding enc = Encoding.Default;//UTF8 Encoding p[1] = (char)0x98; //Byte[] by = enc.GetBytes("\x263c"); p[2] = (char)0xbc; p[3] = (char)0; #endif if (0 != strncmp(str, p, 4)) { printf("CheckInput:Expected:%s,Actual:%d\n", p, str[0]); delete[]p; return false; } delete[]p; return true; } //C Call,In attribute,LPstr extern "C" DLL_EXPORT LPSTR __cdecl CLPStr_In(LPSTR pStr) { //Check the Input if (!CheckInput(pStr)) { ReportFailure("CLPStr_In:Native Side"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(pStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(sizeof(char) * len); strcpy(pBack, pStr); return pBack; } extern "C" DLL_EXPORT LPSTR __cdecl CLPStr_Out(LPSTR pStr) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(sizeof(char) * len); strncpy(pBack, pTemp, strlen(pTemp) + 1); strncpy(pStr, pTemp, strlen(pTemp) + 1); return pBack; } extern "C" DLL_EXPORT LPSTR __cdecl CLPStr_InOut(LPSTR pStr) { //Check the Input if (!CheckInput(pStr)) { ReportFailure("CLPStr_InOut:Native Side"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(pStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, pStr); return pBack; } extern "C" DLL_EXPORT LPSTR __cdecl CLPStr_InByRef(LPSTR* ppStr) { //Check the Input if (!CheckInput(*ppStr)) { ReportFailure("CLPStr_InByRef:Native Side"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(*ppStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, *ppStr); return pBack; } extern "C" DLL_EXPORT LPSTR __cdecl CLPStr_OutByRef(LPSTR* ppStr) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(sizeof(char) * len); strncpy(pBack, pTemp, strlen(pTemp) + 1); *ppStr = (LPSTR)CoreClrAlloc(sizeof(char) * len); strncpy(*ppStr, pTemp, strlen(pTemp) + 1); return pBack; } extern "C" DLL_EXPORT LPSTR __cdecl CLPStr_InOutByRef(LPSTR* ppStr) { //Check the Input if (!CheckInput(*ppStr)) { ReportFailure("CLPStr_InOutByRef:Native Side"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(*ppStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, *ppStr); return pBack; } typedef LPSTR (__cdecl* delegate_cdecl)(LPSTR* ppstr); extern "C" DLL_EXPORT delegate_cdecl __cdecl CLPStr_DelegatePInvoke() { return CLPStr_InOutByRef; } //stdcall extern "C" DLL_EXPORT LPSTR STDMETHODCALLTYPE SLPStr_In(LPSTR pStr) { //Check the Input if (!CheckInput(pStr)) { ReportFailure("SLPStr_In:NativeSide"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(pStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, pStr); return pBack; } extern "C" DLL_EXPORT LPSTR STDMETHODCALLTYPE SLPStr_Out(LPSTR pStr) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(sizeof(char) * len); strncpy(pBack, pTemp, strlen(pTemp) + 1); strncpy(pStr, pTemp, strlen(pTemp) + 1); return pBack; } extern "C" DLL_EXPORT LPSTR STDMETHODCALLTYPE SLPStr_InOut(LPSTR pStr) { //Check the Input if (!CheckInput(pStr)) { ReportFailure("SLPStr_InOut:NativeSide"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(pStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, pStr); return pBack; } extern "C" DLL_EXPORT LPSTR STDMETHODCALLTYPE SLPStr_InByRef(LPSTR* ppStr) { //Check the Input if (!CheckInput(*ppStr)) { ReportFailure("SLPStr_InByRef:NativeSide"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(*ppStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, *ppStr); return pBack; } extern "C" DLL_EXPORT LPSTR STDMETHODCALLTYPE SLPStr_OutByRef(LPSTR* ppStr) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(sizeof(char) * len); strncpy(pBack, pTemp, strlen(pTemp) + 1); *ppStr = (LPSTR)CoreClrAlloc(sizeof(char) * len); strncpy(*ppStr, pTemp, strlen(pTemp) + 1); return pBack; } extern "C" DLL_EXPORT LPSTR STDMETHODCALLTYPE SLPStr_InOutByRef(LPSTR* ppStr) { //Check the Input if (!CheckInput(*ppStr)) { ReportFailure("SLPStr_InOutByRef:NativeSide"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(*ppStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, *ppStr); return pBack; } typedef LPSTR (STDMETHODCALLTYPE *delegate_stdcall)(LPSTR* ppstr); extern "C" DLL_EXPORT delegate_stdcall SLPStr_DelegatePInvoke() { return SLPStr_InOutByRef; } ///Cdecl, Reverse PInvoke typedef LPSTR (__cdecl *CCallBackIn)(LPSTR pstr); extern "C" DLL_EXPORT void __cdecl DoCCallBack_LPSTR_In(CCallBackIn callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp)+1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr,pTemp,len); if(!CheckInput(callback(pStr))) { ReportFailure("DoCCallBack_LPSTR_In:NativeSide"); } CoreClrFree(pStr); } typedef LPSTR (__cdecl *CCallBackOut)(LPSTR pstr); extern "C" DLL_EXPORT void __cdecl DoCCallBack_LPSTR_Out(CCallBackOut callback) { size_t len = 10; LPSTR pStr = (LPSTR)CoreClrAlloc(len); //Check the return value if (!CheckInput(callback(pStr))) { ReportFailure("DoCCallBack_LPSTR_Out:NativeSide,the first check"); } if (!CheckInput(pStr)) { ReportFailure("DoCCallBack_LPSTR_Out:NativeSide,the Second Check"); } CoreClrFree(pStr); } typedef LPSTR (__cdecl *CCallBackInOut)(LPSTR pstr); extern "C" DLL_EXPORT void __cdecl DoCCallBack_LPSTR_InOut(CCallBackInOut callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(pStr))) { ReportFailure("DoCCallBack_LPSTR_InOut:NativeSide,the first check"); } if (!CheckInput(pStr)) { ReportFailure("DoCCallBack_LPSTR_InOut:NativeSide,the Second Check"); } CoreClrFree(pStr); } typedef LPSTR (__cdecl *CallBackInByRef)(LPSTR* pstr); extern "C" DLL_EXPORT void __cdecl DoCCallBack_LPSTR_InByRef(CallBackInByRef callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(&pStr))) { ReportFailure("DoCCallBack_LPSTR_InByRef:NativeSide"); } CoreClrFree(pStr); } typedef LPSTR (__cdecl *CCallBackOutByRef)(LPSTR* pstr); extern "C" DLL_EXPORT void __cdecl DoCCallBack_LPSTR_OutByRef(CCallBackOutByRef callback) { size_t len = 10; LPSTR pStr = (LPSTR)CoreClrAlloc(len); if (!CheckInput(callback(&pStr))) { ReportFailure("DoCCallBack_LPSTR_OutByRef:NativeSide,the first Check"); } if (!CheckInput(pStr)) { ReportFailure("DoCCallBack_LPSTR_OutByRef:NativeSide,the Second Check"); } CoreClrFree(pStr); } typedef LPSTR (__cdecl *CCallBackInOutByRef)(LPSTR* pstr); extern "C" DLL_EXPORT void __cdecl DoCCallBack_LPSTR_InOutByRef(CCallBackInOutByRef callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(&pStr))) { ReportFailure("DoCCallBack_LPSTR_InOutByRef:NativeSide"); } if (!CheckInput(pStr)) { ReportFailure("DoCCallBack_LPSTR_InOutByRef:NativeSide,the Second Check"); } CoreClrFree(pStr); } ///STDCALL Reverse PInvoke typedef LPSTR (STDMETHODCALLTYPE *SCallBackIn)(LPSTR pstr); extern "C" DLL_EXPORT void __cdecl DoSCallBack_LPSTR_In(SCallBackIn callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(pStr))) { ReportFailure("DoSCallBack_LPSTR_In:NativeSide"); } CoreClrFree(pStr); } typedef LPSTR (STDMETHODCALLTYPE *SCallBackOut)(LPSTR pstr); extern "C" DLL_EXPORT void __cdecl DoSCallBack_LPSTR_Out(SCallBackOut callback) { size_t len = 10; LPSTR pStr = (LPSTR)CoreClrAlloc(len); if (!CheckInput(callback(pStr))) { ReportFailure("DoSCallBack_LPSTR_Out:NativeSide,the first check"); } if (!CheckInput(pStr)) { ReportFailure("DoSCallBack_LPSTR_Out:NativeSide,the Second Check"); } CoreClrFree(pStr); } typedef LPSTR (STDMETHODCALLTYPE *SCallBackInOut)(LPSTR pstr); extern "C" DLL_EXPORT void __cdecl DoSCallBack_LPSTR_InOut(SCallBackInOut callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(pStr))) { ReportFailure("DoSCallBack_LPSTR_InOut:NativeSide,the first check"); } if (!CheckInput(pStr)) { ReportFailure("DoSCallBack_LPSTR_InOut:NativeSide,the second Check"); } CoreClrFree(pStr); } typedef LPSTR (STDMETHODCALLTYPE *SCallBackInByRef)(LPSTR* pstr); extern "C" DLL_EXPORT void __cdecl DoSCallBack_LPSTR_InByRef(SCallBackInByRef callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(&pStr))) { ReportFailure("DoSCallBack_LPSTR_InByRef:NativeSide"); } CoreClrFree(pStr); } typedef LPSTR (STDMETHODCALLTYPE *SCallBackOutByRef)(LPSTR* pstr); extern "C" DLL_EXPORT void __cdecl DoSCallBack_LPSTR_OutByRef(SCallBackOutByRef callback) { size_t len = 10; LPSTR pStr = (LPSTR)CoreClrAlloc(len); if (!CheckInput(callback(&pStr))) { ReportFailure("DoSCallBack_LPSTR_OutByRef:NativeSide,the first check"); } if (!CheckInput(pStr)) { ReportFailure("DoSCallBack_LPSTR_OutByRef:NativeSide,the second Check"); } CoreClrFree(pStr); } typedef LPSTR (STDMETHODCALLTYPE *SCallBackInOutByRef)(LPSTR* pstr); extern "C" DLL_EXPORT void __cdecl DoSCallBack_LPSTR_InOutByRef(SCallBackInOutByRef callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(&pStr))) { ReportFailure("DoSCallBack_LPSTR_InOutByRef:NativeSide,the first check"); } if (!CheckInput(pStr)) { ReportFailure("DoSCallBack_LPSTR_InOutByRef:NativeSide,the second Check"); } CoreClrFree(pStr); } #pragma warning( pop )
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdio.h> #include <stdlib.h> #include <locale.h> #include <xplatform.h> #include <platformdefines.h> #pragma warning( push ) #pragma warning( disable : 4996) static int fails = 0; //record the fail numbers // Overload methods for reportfailure static int ReportFailure(const char* s) { printf(" === Fail:%s\n", s); return (++fails); } extern "C" DLL_EXPORT int __cdecl GetResult() { return fails; } //This method is used on Windows Only extern "C" DLL_EXPORT char __cdecl GetByteForWideChar() { #ifdef WINDOWS char * p = new char[3]; WideCharToMultiByte(CP_ACP, 0, W("\x263c"), -1, p, 2, NULL, NULL); p[1] = '\0'; char breturn = p[0]; delete p; return breturn; #else return 0; //It wont be called MAC #endif } //x86: Managed(Encoding: utf8)---->Marshaler(Encoding:ASCII)---->Native(Encoding:utf8) //MAC(x64):Managed(Encoding:utf8)----->Marshaler(Encoding:utf8)---->Native(Encoding:utf8) //Now both side(Managed Side and native side) takes the utf8 encoding when comparing string bool CheckInput(LPSTR str) { //int WideCharToMultiByte( // UINT CodePage, // DWORD dwFlags, // LPCWSTR lpWideCharStr, // int cchWideChar, // LPSTR lpMultiByteStr, // int cbMultiByte, // LPCSTR lpDefaultChar, // LPBOOL lpUsedDefaultChar //); #ifdef WINDOWS char * p = new char[3]; WideCharToMultiByte(CP_ACP, 0, W("\x263c"), -1, p, 2, NULL, NULL); p[1] = '\0'; #else char* p = new char[4]; //00bc98e2,the utf8 code of "\263c",we can get these char value through the following code with C# p[0] = (char)0xe2; //Encoding enc = Encoding.Default;//UTF8 Encoding p[1] = (char)0x98; //Byte[] by = enc.GetBytes("\x263c"); p[2] = (char)0xbc; p[3] = (char)0; #endif if (0 != strncmp(str, p, 4)) { printf("CheckInput:Expected:%s,Actual:%d\n", p, str[0]); delete[]p; return false; } delete[]p; return true; } //C Call,In attribute,LPstr extern "C" DLL_EXPORT LPSTR __cdecl CLPStr_In(LPSTR pStr) { //Check the Input if (!CheckInput(pStr)) { ReportFailure("CLPStr_In:Native Side"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(pStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(sizeof(char) * len); strcpy(pBack, pStr); return pBack; } extern "C" DLL_EXPORT LPSTR __cdecl CLPStr_Out(LPSTR pStr) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(sizeof(char) * len); strncpy(pBack, pTemp, strlen(pTemp) + 1); strncpy(pStr, pTemp, strlen(pTemp) + 1); return pBack; } extern "C" DLL_EXPORT LPSTR __cdecl CLPStr_InOut(LPSTR pStr) { //Check the Input if (!CheckInput(pStr)) { ReportFailure("CLPStr_InOut:Native Side"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(pStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, pStr); return pBack; } extern "C" DLL_EXPORT LPSTR __cdecl CLPStr_InByRef(LPSTR* ppStr) { //Check the Input if (!CheckInput(*ppStr)) { ReportFailure("CLPStr_InByRef:Native Side"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(*ppStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, *ppStr); return pBack; } extern "C" DLL_EXPORT LPSTR __cdecl CLPStr_OutByRef(LPSTR* ppStr) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(sizeof(char) * len); strncpy(pBack, pTemp, strlen(pTemp) + 1); *ppStr = (LPSTR)CoreClrAlloc(sizeof(char) * len); strncpy(*ppStr, pTemp, strlen(pTemp) + 1); return pBack; } extern "C" DLL_EXPORT LPSTR __cdecl CLPStr_InOutByRef(LPSTR* ppStr) { //Check the Input if (!CheckInput(*ppStr)) { ReportFailure("CLPStr_InOutByRef:Native Side"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(*ppStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, *ppStr); return pBack; } typedef LPSTR (__cdecl* delegate_cdecl)(LPSTR* ppstr); extern "C" DLL_EXPORT delegate_cdecl __cdecl CLPStr_DelegatePInvoke() { return CLPStr_InOutByRef; } //stdcall extern "C" DLL_EXPORT LPSTR STDMETHODCALLTYPE SLPStr_In(LPSTR pStr) { //Check the Input if (!CheckInput(pStr)) { ReportFailure("SLPStr_In:NativeSide"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(pStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, pStr); return pBack; } extern "C" DLL_EXPORT LPSTR STDMETHODCALLTYPE SLPStr_Out(LPSTR pStr) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(sizeof(char) * len); strncpy(pBack, pTemp, strlen(pTemp) + 1); strncpy(pStr, pTemp, strlen(pTemp) + 1); return pBack; } extern "C" DLL_EXPORT LPSTR STDMETHODCALLTYPE SLPStr_InOut(LPSTR pStr) { //Check the Input if (!CheckInput(pStr)) { ReportFailure("SLPStr_InOut:NativeSide"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(pStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, pStr); return pBack; } extern "C" DLL_EXPORT LPSTR STDMETHODCALLTYPE SLPStr_InByRef(LPSTR* ppStr) { //Check the Input if (!CheckInput(*ppStr)) { ReportFailure("SLPStr_InByRef:NativeSide"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(*ppStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, *ppStr); return pBack; } extern "C" DLL_EXPORT LPSTR STDMETHODCALLTYPE SLPStr_OutByRef(LPSTR* ppStr) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(sizeof(char) * len); strncpy(pBack, pTemp, strlen(pTemp) + 1); *ppStr = (LPSTR)CoreClrAlloc(sizeof(char) * len); strncpy(*ppStr, pTemp, strlen(pTemp) + 1); return pBack; } extern "C" DLL_EXPORT LPSTR STDMETHODCALLTYPE SLPStr_InOutByRef(LPSTR* ppStr) { //Check the Input if (!CheckInput(*ppStr)) { ReportFailure("SLPStr_InOutByRef:NativeSide"); } //alloc,copy, since we cannot depend the Marshaler's activity. size_t len = strlen(*ppStr) + 1; //+1, Include the NULL Character. LPSTR pBack = (LPSTR)CoreClrAlloc(len); strcpy(pBack, *ppStr); return pBack; } typedef LPSTR (STDMETHODCALLTYPE *delegate_stdcall)(LPSTR* ppstr); extern "C" DLL_EXPORT delegate_stdcall SLPStr_DelegatePInvoke() { return SLPStr_InOutByRef; } ///Cdecl, Reverse PInvoke typedef LPSTR (__cdecl *CCallBackIn)(LPSTR pstr); extern "C" DLL_EXPORT void __cdecl DoCCallBack_LPSTR_In(CCallBackIn callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp)+1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr,pTemp,len); if(!CheckInput(callback(pStr))) { ReportFailure("DoCCallBack_LPSTR_In:NativeSide"); } CoreClrFree(pStr); } typedef LPSTR (__cdecl *CCallBackOut)(LPSTR pstr); extern "C" DLL_EXPORT void __cdecl DoCCallBack_LPSTR_Out(CCallBackOut callback) { size_t len = 10; LPSTR pStr = (LPSTR)CoreClrAlloc(len); //Check the return value if (!CheckInput(callback(pStr))) { ReportFailure("DoCCallBack_LPSTR_Out:NativeSide,the first check"); } if (!CheckInput(pStr)) { ReportFailure("DoCCallBack_LPSTR_Out:NativeSide,the Second Check"); } CoreClrFree(pStr); } typedef LPSTR (__cdecl *CCallBackInOut)(LPSTR pstr); extern "C" DLL_EXPORT void __cdecl DoCCallBack_LPSTR_InOut(CCallBackInOut callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(pStr))) { ReportFailure("DoCCallBack_LPSTR_InOut:NativeSide,the first check"); } if (!CheckInput(pStr)) { ReportFailure("DoCCallBack_LPSTR_InOut:NativeSide,the Second Check"); } CoreClrFree(pStr); } typedef LPSTR (__cdecl *CallBackInByRef)(LPSTR* pstr); extern "C" DLL_EXPORT void __cdecl DoCCallBack_LPSTR_InByRef(CallBackInByRef callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(&pStr))) { ReportFailure("DoCCallBack_LPSTR_InByRef:NativeSide"); } CoreClrFree(pStr); } typedef LPSTR (__cdecl *CCallBackOutByRef)(LPSTR* pstr); extern "C" DLL_EXPORT void __cdecl DoCCallBack_LPSTR_OutByRef(CCallBackOutByRef callback) { size_t len = 10; LPSTR pStr = (LPSTR)CoreClrAlloc(len); if (!CheckInput(callback(&pStr))) { ReportFailure("DoCCallBack_LPSTR_OutByRef:NativeSide,the first Check"); } if (!CheckInput(pStr)) { ReportFailure("DoCCallBack_LPSTR_OutByRef:NativeSide,the Second Check"); } CoreClrFree(pStr); } typedef LPSTR (__cdecl *CCallBackInOutByRef)(LPSTR* pstr); extern "C" DLL_EXPORT void __cdecl DoCCallBack_LPSTR_InOutByRef(CCallBackInOutByRef callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(&pStr))) { ReportFailure("DoCCallBack_LPSTR_InOutByRef:NativeSide"); } if (!CheckInput(pStr)) { ReportFailure("DoCCallBack_LPSTR_InOutByRef:NativeSide,the Second Check"); } CoreClrFree(pStr); } ///STDCALL Reverse PInvoke typedef LPSTR (STDMETHODCALLTYPE *SCallBackIn)(LPSTR pstr); extern "C" DLL_EXPORT void __cdecl DoSCallBack_LPSTR_In(SCallBackIn callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(pStr))) { ReportFailure("DoSCallBack_LPSTR_In:NativeSide"); } CoreClrFree(pStr); } typedef LPSTR (STDMETHODCALLTYPE *SCallBackOut)(LPSTR pstr); extern "C" DLL_EXPORT void __cdecl DoSCallBack_LPSTR_Out(SCallBackOut callback) { size_t len = 10; LPSTR pStr = (LPSTR)CoreClrAlloc(len); if (!CheckInput(callback(pStr))) { ReportFailure("DoSCallBack_LPSTR_Out:NativeSide,the first check"); } if (!CheckInput(pStr)) { ReportFailure("DoSCallBack_LPSTR_Out:NativeSide,the Second Check"); } CoreClrFree(pStr); } typedef LPSTR (STDMETHODCALLTYPE *SCallBackInOut)(LPSTR pstr); extern "C" DLL_EXPORT void __cdecl DoSCallBack_LPSTR_InOut(SCallBackInOut callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(pStr))) { ReportFailure("DoSCallBack_LPSTR_InOut:NativeSide,the first check"); } if (!CheckInput(pStr)) { ReportFailure("DoSCallBack_LPSTR_InOut:NativeSide,the second Check"); } CoreClrFree(pStr); } typedef LPSTR (STDMETHODCALLTYPE *SCallBackInByRef)(LPSTR* pstr); extern "C" DLL_EXPORT void __cdecl DoSCallBack_LPSTR_InByRef(SCallBackInByRef callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(&pStr))) { ReportFailure("DoSCallBack_LPSTR_InByRef:NativeSide"); } CoreClrFree(pStr); } typedef LPSTR (STDMETHODCALLTYPE *SCallBackOutByRef)(LPSTR* pstr); extern "C" DLL_EXPORT void __cdecl DoSCallBack_LPSTR_OutByRef(SCallBackOutByRef callback) { size_t len = 10; LPSTR pStr = (LPSTR)CoreClrAlloc(len); if (!CheckInput(callback(&pStr))) { ReportFailure("DoSCallBack_LPSTR_OutByRef:NativeSide,the first check"); } if (!CheckInput(pStr)) { ReportFailure("DoSCallBack_LPSTR_OutByRef:NativeSide,the second Check"); } CoreClrFree(pStr); } typedef LPSTR (STDMETHODCALLTYPE *SCallBackInOutByRef)(LPSTR* pstr); extern "C" DLL_EXPORT void __cdecl DoSCallBack_LPSTR_InOutByRef(SCallBackInOutByRef callback) { const char* pTemp = "AAAA"; size_t len = strlen(pTemp) + 1; LPSTR pStr = (LPSTR)CoreClrAlloc(len); strncpy(pStr, pTemp, len); if (!CheckInput(callback(&pStr))) { ReportFailure("DoSCallBack_LPSTR_InOutByRef:NativeSide,the first check"); } if (!CheckInput(pStr)) { ReportFailure("DoSCallBack_LPSTR_InOutByRef:NativeSide,the second Check"); } CoreClrFree(pStr); } #pragma warning( pop )
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/tools/StressLogAnalyzer/util.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. template<typename T> struct Volatile { T t; T Load() { return t; } }; typedef void* CRITSEC_COOKIE; #define STRESS_LOG_ANALYZER #include <malloc.h> #include "staticcontract.h" // This macro is used to standardize the wide character string literals between UNIX and Windows. // Unix L"" is UTF32, and on windows it's UTF16. Because of built-in assumptions on the size // of string literals, it's important to match behaviour between Unix and Windows. Unix will be defined // as u"" (char16_t) #ifdef TARGET_UNIX #define W(str) u##str #else // TARGET_UNIX #define W(str) L##str #endif // TARGET_UNIX //***************************************************************************** // // **** CQuickBytes // This helper class is useful for cases where 90% of the time you allocate 512 // or less bytes for a data structure. This class contains a 512 byte buffer. // Alloc() will return a pointer to this buffer if your allocation is small // enough, otherwise it asks the heap for a larger buffer which is freed for // you. No mutex locking is required for the small allocation case, making the // code run faster, less heap fragmentation, etc... Each instance will allocate // 520 bytes, so use accordinly. // //***************************************************************************** template <DWORD SIZE, DWORD INCREMENT> class CQuickBytesBase { public: CQuickBytesBase() : pbBuff(0), iSize(0), cbTotal(SIZE) { } void Destroy() { if (pbBuff) { delete[](BYTE*)pbBuff; pbBuff = 0; } } void* Alloc(SIZE_T iItems) { iSize = iItems; if (iItems <= SIZE) { cbTotal = SIZE; return (&rgData[0]); } else { if (pbBuff) delete[](BYTE*)pbBuff; pbBuff = new BYTE[iItems]; cbTotal = pbBuff ? iItems : 0; return (pbBuff); } } // This is for conformity to the CQuickBytesBase that is defined by the runtime so // that we can use it inside of some GC code that SOS seems to include as well. // // The plain vanilla "Alloc" version on this CQuickBytesBase doesn't throw either, // so we'll just forward the call. void* AllocNoThrow(SIZE_T iItems) { return Alloc(iItems); } HRESULT ReSize(SIZE_T iItems) { void* pbBuffNew; if (iItems <= cbTotal) { iSize = iItems; return NOERROR; } pbBuffNew = new BYTE[iItems + INCREMENT]; if (!pbBuffNew) return E_OUTOFMEMORY; if (pbBuff) { memcpy(pbBuffNew, pbBuff, cbTotal); delete[](BYTE*)pbBuff; } else { _ASSERTE(cbTotal == SIZE); memcpy(pbBuffNew, rgData, SIZE); } cbTotal = iItems + INCREMENT; iSize = iItems; pbBuff = pbBuffNew; return NOERROR; } operator PVOID() { return ((pbBuff) ? pbBuff : &rgData[0]); } void* Ptr() { return ((pbBuff) ? pbBuff : &rgData[0]); } SIZE_T Size() { return (iSize); } SIZE_T MaxSize() { return (cbTotal); } void* pbBuff; SIZE_T iSize; // number of bytes used SIZE_T cbTotal; // total bytes allocated in the buffer // use UINT64 to enforce the alignment of the memory UINT64 rgData[(SIZE + sizeof(UINT64) - 1) / sizeof(UINT64)]; }; #define CQUICKBYTES_BASE_SIZE 512 #define CQUICKBYTES_INCREMENTAL_SIZE 128 class CQuickBytesNoDtor : public CQuickBytesBase<CQUICKBYTES_BASE_SIZE, CQUICKBYTES_INCREMENTAL_SIZE> { }; class CQuickBytes : public CQuickBytesNoDtor { public: CQuickBytes() { } ~CQuickBytes() { Destroy(); } };
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. template<typename T> struct Volatile { T t; T Load() { return t; } }; typedef void* CRITSEC_COOKIE; #define STRESS_LOG_ANALYZER #include <malloc.h> #include "staticcontract.h" // This macro is used to standardize the wide character string literals between UNIX and Windows. // Unix L"" is UTF32, and on windows it's UTF16. Because of built-in assumptions on the size // of string literals, it's important to match behaviour between Unix and Windows. Unix will be defined // as u"" (char16_t) #ifdef TARGET_UNIX #define W(str) u##str #else // TARGET_UNIX #define W(str) L##str #endif // TARGET_UNIX //***************************************************************************** // // **** CQuickBytes // This helper class is useful for cases where 90% of the time you allocate 512 // or less bytes for a data structure. This class contains a 512 byte buffer. // Alloc() will return a pointer to this buffer if your allocation is small // enough, otherwise it asks the heap for a larger buffer which is freed for // you. No mutex locking is required for the small allocation case, making the // code run faster, less heap fragmentation, etc... Each instance will allocate // 520 bytes, so use accordinly. // //***************************************************************************** template <DWORD SIZE, DWORD INCREMENT> class CQuickBytesBase { public: CQuickBytesBase() : pbBuff(0), iSize(0), cbTotal(SIZE) { } void Destroy() { if (pbBuff) { delete[](BYTE*)pbBuff; pbBuff = 0; } } void* Alloc(SIZE_T iItems) { iSize = iItems; if (iItems <= SIZE) { cbTotal = SIZE; return (&rgData[0]); } else { if (pbBuff) delete[](BYTE*)pbBuff; pbBuff = new BYTE[iItems]; cbTotal = pbBuff ? iItems : 0; return (pbBuff); } } // This is for conformity to the CQuickBytesBase that is defined by the runtime so // that we can use it inside of some GC code that SOS seems to include as well. // // The plain vanilla "Alloc" version on this CQuickBytesBase doesn't throw either, // so we'll just forward the call. void* AllocNoThrow(SIZE_T iItems) { return Alloc(iItems); } HRESULT ReSize(SIZE_T iItems) { void* pbBuffNew; if (iItems <= cbTotal) { iSize = iItems; return NOERROR; } pbBuffNew = new BYTE[iItems + INCREMENT]; if (!pbBuffNew) return E_OUTOFMEMORY; if (pbBuff) { memcpy(pbBuffNew, pbBuff, cbTotal); delete[](BYTE*)pbBuff; } else { _ASSERTE(cbTotal == SIZE); memcpy(pbBuffNew, rgData, SIZE); } cbTotal = iItems + INCREMENT; iSize = iItems; pbBuff = pbBuffNew; return NOERROR; } operator PVOID() { return ((pbBuff) ? pbBuff : &rgData[0]); } void* Ptr() { return ((pbBuff) ? pbBuff : &rgData[0]); } SIZE_T Size() { return (iSize); } SIZE_T MaxSize() { return (cbTotal); } void* pbBuff; SIZE_T iSize; // number of bytes used SIZE_T cbTotal; // total bytes allocated in the buffer // use UINT64 to enforce the alignment of the memory UINT64 rgData[(SIZE + sizeof(UINT64) - 1) / sizeof(UINT64)]; }; #define CQUICKBYTES_BASE_SIZE 512 #define CQUICKBYTES_INCREMENTAL_SIZE 128 class CQuickBytesNoDtor : public CQuickBytesBase<CQUICKBYTES_BASE_SIZE, CQUICKBYTES_INCREMENTAL_SIZE> { }; class CQuickBytes : public CQuickBytesNoDtor { public: CQuickBytes() { } ~CQuickBytes() { Destroy(); } };
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/native/libs/System.Native/pal_process.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_compiler.h" #include "pal_types.h" #include <stdio.h> #include <string.h> /** * Used by System.Diagnostics.Process.Start to fork/exec a new process. * * This function takes the place of directly using fork and execve from managed code, * in order to avoid executing managed code in the child process in the window between * fork and execve, which is not safe. * * As would have been the case with fork/execve, a return value of 0 is success and -1 * is failure; if failure, error information is provided in errno. */ PALEXPORT int32_t SystemNative_ForkAndExecProcess( const char* filename, // filename argument to execve char* const argv[], // argv argument to execve char* const envp[], // envp argument to execve const char* cwd, // path passed to chdir in child process int32_t redirectStdin, // whether to redirect standard input from the parent int32_t redirectStdout, // whether to redirect standard output to the parent int32_t redirectStderr, // whether to redirect standard error to the parent int32_t setCredentials, // whether to set the userId and groupId for the child process uint32_t userId, // the user id under which the child process should run uint32_t groupId, // the group id under which the child process should run uint32_t* groups, // the groups under which the child process should run int32_t groupsLength, // the length of groups int32_t* childPid, // [out] the child process' id int32_t* stdinFd, // [out] if redirectStdin, the parent's fd for the child's stdin int32_t* stdoutFd, // [out] if redirectStdout, the parent's fd for the child's stdout int32_t* stderrFd); // [out] if redirectStderr, the parent's fd for the child's stderr /************ * The values below in the header are fixed and correct for managed callers to use forever. * We must never change them. The implementation must either static_assert that they are equal * to the native equivalent OR convert them appropriately. */ /** * These values differ from OS to OS, so make a constant contract. * These values apply for the current process only */ typedef enum { PAL_RLIMIT_CPU = 0, // CPU limit in seconds PAL_RLIMIT_FSIZE = 1, // Largest file that can be created, in bytes PAL_RLIMIT_DATA = 2, // Maximum size of data segment, in bytes PAL_RLIMIT_STACK = 3, // Maximum size of stack segment, in bytes PAL_RLIMIT_CORE = 4, // Largest core file that can be created, in bytes PAL_RLIMIT_AS = 5, // Address space limit PAL_RLIMIT_RSS = 6, // Largest resident set size, in bytes PAL_RLIMIT_MEMLOCK = 7, // Locked-in-memory address space PAL_RLIMIT_NPROC = 8, // Number of processes PAL_RLIMIT_NOFILE = 9, // Number of open files } RLimitResources; typedef enum { PAL_NONE = 0, PAL_SIGKILL = 9, /* kill the specified process */ PAL_SIGSTOP = 19, } Signals; /** * Constants for passing to the first parameter of syslog. * These are a combination of flags where the lower bits are * the priority and the higher bits are the facility. The lower * bits cannot be OR'd together; they must be OR'd with the higer bits. * * These values keep their original definition and are taken from syslog.h */ typedef enum { // Priorities PAL_LOG_EMERG = 0, /* system is unusable */ PAL_LOG_ALERT = 1, /* action must be taken immediately */ PAL_LOG_CRIT = 2, /* critical conditions */ PAL_LOG_ERR = 3, /* error conditions */ PAL_LOG_WARNING = 4, /* warning conditions */ PAL_LOG_NOTICE = 5, /* normal but significant condition */ PAL_LOG_INFO = 6, /* informational */ PAL_LOG_DEBUG = 7, /* debug-level messages */ } SysLogPriority; /** * Constants to pass into pathconf. * * Note - these differ per OS so these values are the PAL-specific * values; they must be converted to the correct platform * values before passing to pathconf. */ typedef enum { PAL_PC_LINK_MAX = 1, PAL_PC_MAX_CANON = 2, PAL_PC_MAX_INPUT = 3, PAL_PC_NAME_MAX = 4, PAL_PC_PATH_MAX = 5, PAL_PC_PIPE_BUF = 6, PAL_PC_CHOWN_RESTRICTED = 7, PAL_PC_NO_TRUNC = 8, PAL_PC_VDISABLE = 9, } PathConfName; /** * Constants for passing to GetPriority and SetPriority. */ typedef enum { PAL_PRIO_PROCESS = 0, PAL_PRIO_PGRP = 1, PAL_PRIO_USER = 2, } PriorityWhich; /** * The current and maximum resource values for the current process. * These values are depict the resource according to the above enum. */ typedef struct { uint64_t CurrentLimit; uint64_t MaximumLimit; } RLimit; /** * The native struct is dependent on the size of a numeric type * so make it the largest possible value here and then we will * copy to native as necessary */ typedef struct { uint64_t Bits[16]; // __CPU_SETSIZE / (8 * sizeof(int64_t)) } CpuSetBits; /** * Get the current limit for the specified resource of the current process. * Returns 0 on success; returns -1 on failure and errno is set to the error reason. */ PALEXPORT int32_t SystemNative_GetRLimit(RLimitResources resourceType, RLimit* limits); /** * Set the soft and hard limits for the specified resource. * Only a super-user can increase hard limits for the current process. * Returns 0 on success; returns -1 on failure and errno is set to the error reason. */ PALEXPORT int32_t SystemNative_SetRLimit(RLimitResources resourceType, const RLimit* limits); /** * Kill the specified process (or process group) identified by the supplied pid; the * process or process group will be killed by the specified signal. * Returns 0 on success; on failure, -1 is returned and errno is set */ PALEXPORT int32_t SystemNative_Kill(int32_t pid, int32_t signal); /** * Returns the Process ID of the current executing process. * This call should never fail */ PALEXPORT int32_t SystemNative_GetPid(void); /** * Returns the sessions ID of the specified process; if 0 is passed in, returns the * session ID of the current process. * Returns a session ID on success; otherwise, returns -1 and sets errno. */ PALEXPORT int32_t SystemNative_GetSid(int32_t pid); /** * Write a message to the system logger, which in turn writes the message to the system console, log files, etc. * See man 3 syslog for more info */ PALEXPORT void SystemNative_SysLog(SysLogPriority priority, const char* message, const char* arg1); /** * Returns the pid of a terminated child without reaping it. * * 1) returns the process id of a terminated child process * 2) if no children are terminated, 0 is returned * 3) on error, -1 is returned */ PALEXPORT int32_t SystemNative_WaitIdAnyExitedNoHangNoWait(void); /** * Reaps a terminated child. * * 1) when a child is reaped, its process id is returned * 2) if pid is not a child or there are no unwaited-for children, -1 is returned (errno=ECHILD) * 3) if the child has not yet terminated, 0 is returned * 4) on error, -1 is returned. */ PALEXPORT int32_t SystemNative_WaitPidExitedNoHang(int32_t pid, int32_t* exitCode); /** * Gets the configurable limit or variable for system path or file descriptor options. * * Returns the requested variable value on success; if the variable does not have a limit, -1 is returned and errno * is not set; otherwise, -1 is returned and errno is set. */ PALEXPORT int64_t SystemNative_PathConf(const char* path, PathConfName name); /** * Gets the priority (nice value) of a certain execution group. * * Returns the nice value (from -20 to 20) of the group on success; otherwise, returns -1. Unfortunately, -1 is also a * valid nice value, meaning we can't use that value to determine valid output or not. Errno is set on failure so * we need to reset errno before a call and check the value if we get -1. */ PALEXPORT int32_t SystemNative_GetPriority(PriorityWhich which, int32_t who); /** * Sets the priority (nice value) of a certain execution group. * * Returns 0 on success; otherwise, -1 and errno is set. */ PALEXPORT int32_t SystemNative_SetPriority(PriorityWhich which, int32_t who, int32_t nice); /** * Gets the current working directory of the currently executing process. */ PALEXPORT char* SystemNative_GetCwd(char* buffer, int32_t bufferSize); /** * Sets the CPU affinity mask for a specified thread (or the current thread if 0). * * Returns 0 on success; otherwise, -1 is returned and errno is set */ PALEXPORT int32_t SystemNative_SchedSetAffinity(int32_t pid, intptr_t* mask); /** * Gets the affinity mask of the specified thread (or the current thread if 0). * * Returns 0 on success; otherwise, -1 is returned and errno is set. */ PALEXPORT int32_t SystemNative_SchedGetAffinity(int32_t pid, intptr_t* mask); /** * Returns the path of the executable that started the currently executing process, * resolving symbolic links. The caller is responsible for releasing the buffer. */ PALEXPORT char* SystemNative_GetProcessPath(void);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #pragma once #include "pal_compiler.h" #include "pal_types.h" #include <stdio.h> #include <string.h> /** * Used by System.Diagnostics.Process.Start to fork/exec a new process. * * This function takes the place of directly using fork and execve from managed code, * in order to avoid executing managed code in the child process in the window between * fork and execve, which is not safe. * * As would have been the case with fork/execve, a return value of 0 is success and -1 * is failure; if failure, error information is provided in errno. */ PALEXPORT int32_t SystemNative_ForkAndExecProcess( const char* filename, // filename argument to execve char* const argv[], // argv argument to execve char* const envp[], // envp argument to execve const char* cwd, // path passed to chdir in child process int32_t redirectStdin, // whether to redirect standard input from the parent int32_t redirectStdout, // whether to redirect standard output to the parent int32_t redirectStderr, // whether to redirect standard error to the parent int32_t setCredentials, // whether to set the userId and groupId for the child process uint32_t userId, // the user id under which the child process should run uint32_t groupId, // the group id under which the child process should run uint32_t* groups, // the groups under which the child process should run int32_t groupsLength, // the length of groups int32_t* childPid, // [out] the child process' id int32_t* stdinFd, // [out] if redirectStdin, the parent's fd for the child's stdin int32_t* stdoutFd, // [out] if redirectStdout, the parent's fd for the child's stdout int32_t* stderrFd); // [out] if redirectStderr, the parent's fd for the child's stderr /************ * The values below in the header are fixed and correct for managed callers to use forever. * We must never change them. The implementation must either static_assert that they are equal * to the native equivalent OR convert them appropriately. */ /** * These values differ from OS to OS, so make a constant contract. * These values apply for the current process only */ typedef enum { PAL_RLIMIT_CPU = 0, // CPU limit in seconds PAL_RLIMIT_FSIZE = 1, // Largest file that can be created, in bytes PAL_RLIMIT_DATA = 2, // Maximum size of data segment, in bytes PAL_RLIMIT_STACK = 3, // Maximum size of stack segment, in bytes PAL_RLIMIT_CORE = 4, // Largest core file that can be created, in bytes PAL_RLIMIT_AS = 5, // Address space limit PAL_RLIMIT_RSS = 6, // Largest resident set size, in bytes PAL_RLIMIT_MEMLOCK = 7, // Locked-in-memory address space PAL_RLIMIT_NPROC = 8, // Number of processes PAL_RLIMIT_NOFILE = 9, // Number of open files } RLimitResources; typedef enum { PAL_NONE = 0, PAL_SIGKILL = 9, /* kill the specified process */ PAL_SIGSTOP = 19, } Signals; /** * Constants for passing to the first parameter of syslog. * These are a combination of flags where the lower bits are * the priority and the higher bits are the facility. The lower * bits cannot be OR'd together; they must be OR'd with the higer bits. * * These values keep their original definition and are taken from syslog.h */ typedef enum { // Priorities PAL_LOG_EMERG = 0, /* system is unusable */ PAL_LOG_ALERT = 1, /* action must be taken immediately */ PAL_LOG_CRIT = 2, /* critical conditions */ PAL_LOG_ERR = 3, /* error conditions */ PAL_LOG_WARNING = 4, /* warning conditions */ PAL_LOG_NOTICE = 5, /* normal but significant condition */ PAL_LOG_INFO = 6, /* informational */ PAL_LOG_DEBUG = 7, /* debug-level messages */ } SysLogPriority; /** * Constants to pass into pathconf. * * Note - these differ per OS so these values are the PAL-specific * values; they must be converted to the correct platform * values before passing to pathconf. */ typedef enum { PAL_PC_LINK_MAX = 1, PAL_PC_MAX_CANON = 2, PAL_PC_MAX_INPUT = 3, PAL_PC_NAME_MAX = 4, PAL_PC_PATH_MAX = 5, PAL_PC_PIPE_BUF = 6, PAL_PC_CHOWN_RESTRICTED = 7, PAL_PC_NO_TRUNC = 8, PAL_PC_VDISABLE = 9, } PathConfName; /** * Constants for passing to GetPriority and SetPriority. */ typedef enum { PAL_PRIO_PROCESS = 0, PAL_PRIO_PGRP = 1, PAL_PRIO_USER = 2, } PriorityWhich; /** * The current and maximum resource values for the current process. * These values are depict the resource according to the above enum. */ typedef struct { uint64_t CurrentLimit; uint64_t MaximumLimit; } RLimit; /** * The native struct is dependent on the size of a numeric type * so make it the largest possible value here and then we will * copy to native as necessary */ typedef struct { uint64_t Bits[16]; // __CPU_SETSIZE / (8 * sizeof(int64_t)) } CpuSetBits; /** * Get the current limit for the specified resource of the current process. * Returns 0 on success; returns -1 on failure and errno is set to the error reason. */ PALEXPORT int32_t SystemNative_GetRLimit(RLimitResources resourceType, RLimit* limits); /** * Set the soft and hard limits for the specified resource. * Only a super-user can increase hard limits for the current process. * Returns 0 on success; returns -1 on failure and errno is set to the error reason. */ PALEXPORT int32_t SystemNative_SetRLimit(RLimitResources resourceType, const RLimit* limits); /** * Kill the specified process (or process group) identified by the supplied pid; the * process or process group will be killed by the specified signal. * Returns 0 on success; on failure, -1 is returned and errno is set */ PALEXPORT int32_t SystemNative_Kill(int32_t pid, int32_t signal); /** * Returns the Process ID of the current executing process. * This call should never fail */ PALEXPORT int32_t SystemNative_GetPid(void); /** * Returns the sessions ID of the specified process; if 0 is passed in, returns the * session ID of the current process. * Returns a session ID on success; otherwise, returns -1 and sets errno. */ PALEXPORT int32_t SystemNative_GetSid(int32_t pid); /** * Write a message to the system logger, which in turn writes the message to the system console, log files, etc. * See man 3 syslog for more info */ PALEXPORT void SystemNative_SysLog(SysLogPriority priority, const char* message, const char* arg1); /** * Returns the pid of a terminated child without reaping it. * * 1) returns the process id of a terminated child process * 2) if no children are terminated, 0 is returned * 3) on error, -1 is returned */ PALEXPORT int32_t SystemNative_WaitIdAnyExitedNoHangNoWait(void); /** * Reaps a terminated child. * * 1) when a child is reaped, its process id is returned * 2) if pid is not a child or there are no unwaited-for children, -1 is returned (errno=ECHILD) * 3) if the child has not yet terminated, 0 is returned * 4) on error, -1 is returned. */ PALEXPORT int32_t SystemNative_WaitPidExitedNoHang(int32_t pid, int32_t* exitCode); /** * Gets the configurable limit or variable for system path or file descriptor options. * * Returns the requested variable value on success; if the variable does not have a limit, -1 is returned and errno * is not set; otherwise, -1 is returned and errno is set. */ PALEXPORT int64_t SystemNative_PathConf(const char* path, PathConfName name); /** * Gets the priority (nice value) of a certain execution group. * * Returns the nice value (from -20 to 20) of the group on success; otherwise, returns -1. Unfortunately, -1 is also a * valid nice value, meaning we can't use that value to determine valid output or not. Errno is set on failure so * we need to reset errno before a call and check the value if we get -1. */ PALEXPORT int32_t SystemNative_GetPriority(PriorityWhich which, int32_t who); /** * Sets the priority (nice value) of a certain execution group. * * Returns 0 on success; otherwise, -1 and errno is set. */ PALEXPORT int32_t SystemNative_SetPriority(PriorityWhich which, int32_t who, int32_t nice); /** * Gets the current working directory of the currently executing process. */ PALEXPORT char* SystemNative_GetCwd(char* buffer, int32_t bufferSize); /** * Sets the CPU affinity mask for a specified thread (or the current thread if 0). * * Returns 0 on success; otherwise, -1 is returned and errno is set */ PALEXPORT int32_t SystemNative_SchedSetAffinity(int32_t pid, intptr_t* mask); /** * Gets the affinity mask of the specified thread (or the current thread if 0). * * Returns 0 on success; otherwise, -1 is returned and errno is set. */ PALEXPORT int32_t SystemNative_SchedGetAffinity(int32_t pid, intptr_t* mask); /** * Returns the path of the executable that started the currently executing process, * resolving symbolic links. The caller is responsible for releasing the buffer. */ PALEXPORT char* SystemNative_GetProcessPath(void);
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/tests/palsuite/exception_handling/pal_sxs/test1/exceptionsxs.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: exceptionsxs.c (exception_handling\pal_sxs\test1) ** ** Purpose: Test to make sure the PAL_EXCEPT block is executed ** after an exception occurs in the PAL_TRY block with ** multiple PALs in the process. ** ** **===================================================================*/ #include <stdio.h> #include <signal.h> #include <errno.h> #include <sys/ucontext.h> #include <sys/utsname.h> #include <unistd.h> enum { PASS = 0, FAIL = 1 }; extern "C" int InitializeDllTest1(); extern "C" int InitializeDllTest2(); extern "C" int DllTest1(); extern "C" int DllTest2(); bool bSignal = false; bool bCatch = false; bool bHandler = false; void sigsegv_handler(int code, siginfo_t *siginfo, void *context) { printf("pal_sxs test1: signal handler called\n"); bHandler = true; // Mark that the signal handler was executed if (!bSignal) { printf("ERROR: executed signal handler NOT from try/catch\n"); _exit(FAIL); } // Validate that the faulting address is correct; the contents of "p" (0x33000). if (siginfo->si_addr != (void *)0x33000) { printf("ERROR: signal handler faulting address != 0x33000\n"); _exit(FAIL); } // Unmask signal so we can receive it again sigset_t signal_set; sigemptyset(&signal_set); sigaddset(&signal_set, SIGSEGV); if (-1 == sigprocmask(SIG_UNBLOCK, &signal_set, NULL)) { printf("ERROR: sigprocmask failed; error is %d\n", errno); _exit(FAIL); } printf("Signal chaining PASSED\n"); _exit(PASS); } int main(int argc, char *argv[]) { struct sigaction newAction; struct sigaction oldAction; newAction.sa_flags = SA_SIGINFO | SA_RESTART; newAction.sa_handler = NULL; newAction.sa_sigaction = sigsegv_handler; sigemptyset(&newAction.sa_mask); if (-1 == sigaction(SIGSEGV, &newAction, &oldAction)) { printf("ERROR: sigaction failed; error is %d\n", errno); return FAIL; } printf("PAL_SXS test1 SIGSEGV handler %p\n", oldAction.sa_sigaction); if (0 != InitializeDllTest1()) { return FAIL; } if (0 != InitializeDllTest2()) { return FAIL; } // Test catching exceptions in other PAL instances DllTest2(); DllTest1(); DllTest2(); if (bHandler) { printf("ERROR: signal handler called by PAL sxs tests\n"); return FAIL; } printf("Starting PAL_SXS test1 signal chaining\n"); bSignal = true; volatile int* p = (volatile int *)0x33000; // Invalid pointer *p = 3; // Causes an access violation exception printf("ERROR: code was executed after the access violation.\n"); return FAIL; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: exceptionsxs.c (exception_handling\pal_sxs\test1) ** ** Purpose: Test to make sure the PAL_EXCEPT block is executed ** after an exception occurs in the PAL_TRY block with ** multiple PALs in the process. ** ** **===================================================================*/ #include <stdio.h> #include <signal.h> #include <errno.h> #include <sys/ucontext.h> #include <sys/utsname.h> #include <unistd.h> enum { PASS = 0, FAIL = 1 }; extern "C" int InitializeDllTest1(); extern "C" int InitializeDllTest2(); extern "C" int DllTest1(); extern "C" int DllTest2(); bool bSignal = false; bool bCatch = false; bool bHandler = false; void sigsegv_handler(int code, siginfo_t *siginfo, void *context) { printf("pal_sxs test1: signal handler called\n"); bHandler = true; // Mark that the signal handler was executed if (!bSignal) { printf("ERROR: executed signal handler NOT from try/catch\n"); _exit(FAIL); } // Validate that the faulting address is correct; the contents of "p" (0x33000). if (siginfo->si_addr != (void *)0x33000) { printf("ERROR: signal handler faulting address != 0x33000\n"); _exit(FAIL); } // Unmask signal so we can receive it again sigset_t signal_set; sigemptyset(&signal_set); sigaddset(&signal_set, SIGSEGV); if (-1 == sigprocmask(SIG_UNBLOCK, &signal_set, NULL)) { printf("ERROR: sigprocmask failed; error is %d\n", errno); _exit(FAIL); } printf("Signal chaining PASSED\n"); _exit(PASS); } int main(int argc, char *argv[]) { struct sigaction newAction; struct sigaction oldAction; newAction.sa_flags = SA_SIGINFO | SA_RESTART; newAction.sa_handler = NULL; newAction.sa_sigaction = sigsegv_handler; sigemptyset(&newAction.sa_mask); if (-1 == sigaction(SIGSEGV, &newAction, &oldAction)) { printf("ERROR: sigaction failed; error is %d\n", errno); return FAIL; } printf("PAL_SXS test1 SIGSEGV handler %p\n", oldAction.sa_sigaction); if (0 != InitializeDllTest1()) { return FAIL; } if (0 != InitializeDllTest2()) { return FAIL; } // Test catching exceptions in other PAL instances DllTest2(); DllTest1(); DllTest2(); if (bHandler) { printf("ERROR: signal handler called by PAL sxs tests\n"); return FAIL; } printf("Starting PAL_SXS test1 signal chaining\n"); bSignal = true; volatile int* p = (volatile int *)0x33000; // Invalid pointer *p = 3; // Causes an access violation exception printf("ERROR: code was executed after the access violation.\n"); return FAIL; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/vm/arm64/profiler.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" #ifdef PROFILING_SUPPORTED #include "proftoeeinterfaceimpl.h" #define PROFILE_ENTER 1 #define PROFILE_LEAVE 2 #define PROFILE_TAILCALL 4 #define PROFILE_PLATFORM_SPECIFIC_DATA_BUFFER_SIZE (NUM_FLOAT_ARGUMENT_REGISTERS * sizeof(double)) typedef struct _PROFILE_PLATFORM_SPECIFIC_DATA { void* Fp; void* Pc; void* x8; ArgumentRegisters argumentRegisters; FunctionID functionId; FloatArgumentRegisters floatArgumentRegisters; void* probeSp; void* profiledSp; void* hiddenArg; UINT32 flags; UINT32 unused; BYTE buffer[PROFILE_PLATFORM_SPECIFIC_DATA_BUFFER_SIZE]; } PROFILE_PLATFORM_SPECIFIC_DATA, *PPROFILE_PLATFORM_SPECIFIC_DATA; UINT_PTR ProfileGetIPFromPlatformSpecificHandle(void* pPlatformSpecificHandle) { LIMITED_METHOD_CONTRACT; PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(pPlatformSpecificHandle); return (UINT_PTR)pData->Pc; } void ProfileSetFunctionIDInPlatformSpecificHandle(void* pPlatformSpecificHandle, FunctionID functionId) { LIMITED_METHOD_CONTRACT; _ASSERTE(pPlatformSpecificHandle != nullptr); _ASSERTE(functionId != 0); PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(pPlatformSpecificHandle); pData->functionId = functionId; } ProfileArgIterator::ProfileArgIterator(MetaSig* pSig, void* pPlatformSpecificHandle) : m_argIterator(pSig), m_bufferPos(0) { WRAPPER_NO_CONTRACT; _ASSERTE(pSig != nullptr); _ASSERTE(pPlatformSpecificHandle != nullptr); m_handle = pPlatformSpecificHandle; PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(pPlatformSpecificHandle); ZeroMemory(pData->buffer, PROFILE_PLATFORM_SPECIFIC_DATA_BUFFER_SIZE); #ifdef _DEBUG // Unwind a frame and get the SP for the profiled method to make sure it matches // what the JIT gave us // Setup the context to represent the frame that called ProfileEnterNaked CONTEXT ctx; memset(&ctx, 0, sizeof(CONTEXT)); ctx.Sp = (DWORD64)pData->probeSp; ctx.Fp = (DWORD64)pData->Fp; ctx.Pc = (DWORD64)pData->Pc; // Walk up a frame to the caller frame (called the managed method which called ProfileEnterNaked) Thread::VirtualUnwindCallFrame(&ctx); _ASSERTE(pData->profiledSp == (void*)ctx.Sp); #endif // Get the hidden arg if there is one MethodDesc* pMD = FunctionIdToMethodDesc(pData->functionId); if ((pData->hiddenArg == nullptr) && (pMD->RequiresInstArg() || pMD->AcquiresInstMethodTableFromThis())) { if ((pData->flags & PROFILE_ENTER) != 0) { if (pMD->AcquiresInstMethodTableFromThis()) { pData->hiddenArg = GetThis(); } else { // On ARM64 the generic instantiation parameter comes after the optional "this" pointer. if (m_argIterator.HasThis()) { pData->hiddenArg = (void*)pData->argumentRegisters.x[1]; } else { pData->hiddenArg = (void*)pData->argumentRegisters.x[0]; } } } else { EECodeInfo codeInfo((PCODE)pData->Pc); // We want to pass the caller SP here. pData->hiddenArg = EECodeManager::GetExactGenericsToken((SIZE_T)(pData->profiledSp), &codeInfo); } } } ProfileArgIterator::~ProfileArgIterator() { LIMITED_METHOD_CONTRACT; m_handle = nullptr; } LPVOID ProfileArgIterator::CopyStructFromFPRegs(int firstFPReg, int numFPRegs, int hfaFieldSize) { WRAPPER_NO_CONTRACT; PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(m_handle); if (hfaFieldSize == 8) { UINT64* pDest = (UINT64*)&pData->buffer[m_bufferPos]; for (int i = 0; i < numFPRegs; ++i) { pDest[i] = (UINT64)pData->floatArgumentRegisters.q[firstFPReg + i].Low; } m_bufferPos += numFPRegs * sizeof(UINT64); return pDest; } else { _ASSERTE(hfaFieldSize == 4); UINT32* pDest = (UINT32*)&pData->buffer[m_bufferPos]; for (int i = 0; i < numFPRegs; ++i) { pDest[i] = (UINT32)pData->floatArgumentRegisters.q[firstFPReg + i].Low; } m_bufferPos += numFPRegs * sizeof(UINT32); return pDest; } } LPVOID ProfileArgIterator::GetNextArgAddr() { WRAPPER_NO_CONTRACT; _ASSERTE(m_handle != nullptr); PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(m_handle); if ((pData->flags & (PROFILE_LEAVE | PROFILE_TAILCALL)) != 0) { _ASSERTE(!"GetNextArgAddr() - arguments are not available in leave and tailcall probes"); return nullptr; } int argOffset = m_argIterator.GetNextOffset(); if (argOffset == TransitionBlock::InvalidOffset) { return nullptr; } if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset)) { ArgLocDesc argLocDesc; m_argIterator.GetArgLoc(argOffset, &argLocDesc); if (argLocDesc.m_cFloatReg > 1) { if (argLocDesc.m_hfaFieldSize != 16) { return CopyStructFromFPRegs(argLocDesc.m_idxFloatReg, argLocDesc.m_cFloatReg, argLocDesc.m_hfaFieldSize); } } #ifdef _DEBUG else { _ASSERTE(argLocDesc.m_cFloatReg == 1); } #endif return (LPBYTE)&pData->floatArgumentRegisters.q[argLocDesc.m_idxFloatReg]; } LPVOID pArg = nullptr; if (TransitionBlock::IsArgumentRegisterOffset(argOffset)) { pArg = (LPBYTE)&pData->argumentRegisters + (argOffset - TransitionBlock::GetOffsetOfArgumentRegisters()); } else { _ASSERTE(TransitionBlock::IsStackArgumentOffset(argOffset)); pArg = (LPBYTE)pData->profiledSp + (argOffset - TransitionBlock::GetOffsetOfArgs()); } if (m_argIterator.IsArgPassedByRef()) { pArg = *(LPVOID*)pArg; } return pArg; } LPVOID ProfileArgIterator::GetHiddenArgValue(void) { LIMITED_METHOD_CONTRACT; PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(m_handle); return pData->hiddenArg; } LPVOID ProfileArgIterator::GetThis(void) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)m_handle; MethodDesc* pMD = FunctionIdToMethodDesc(pData->functionId); // We guarantee to return the correct "this" pointer in the enter probe. // For the leave and tailcall probes, we only return a valid "this" pointer if it is the generics token. if (pData->hiddenArg != nullptr) { if (pMD->AcquiresInstMethodTableFromThis()) { return pData->hiddenArg; } } if ((pData->flags & PROFILE_ENTER) != 0) { if (m_argIterator.HasThis()) { return (LPVOID)pData->argumentRegisters.x[0]; } } return nullptr; } LPVOID ProfileArgIterator::GetReturnBufferAddr(void) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(m_handle); if ((pData->flags & PROFILE_TAILCALL) != 0) { _ASSERTE(!"GetReturnBufferAddr() - return buffer address is not available in tailcall probe"); return nullptr; } if (m_argIterator.HasRetBuffArg()) { if ((pData->flags & PROFILE_ENTER) != 0) { return (LPVOID)pData->x8; } else { // On ARM64 there is no requirement for the method to preserve the value stored in x8. // In order to workaround this JIT will explicitly return the return buffer address in x0. _ASSERTE((pData->flags & PROFILE_LEAVE) != 0); return (LPVOID)pData->argumentRegisters.x[0]; } } UINT fpReturnSize = m_argIterator.GetFPReturnSize(); if (fpReturnSize != 0) { TypeHandle thReturnValueType; m_argIterator.GetSig()->GetReturnTypeNormalized(&thReturnValueType); if (!thReturnValueType.IsNull() && thReturnValueType.IsHFA()) { CorInfoHFAElemType hfaElemType = thReturnValueType.GetHFAType(); if (hfaElemType == CORINFO_HFA_ELEM_VECTOR128) { return &pData->floatArgumentRegisters.q[0]; } else { int hfaFieldSize = 8; if (hfaElemType == CORINFO_HFA_ELEM_FLOAT) { hfaFieldSize = 4; } #ifdef _DEBUG else { _ASSERTE((hfaElemType == CORINFO_HFA_ELEM_DOUBLE) || (hfaElemType == CORINFO_HFA_ELEM_VECTOR64)); } #endif const int cntFPRegs = thReturnValueType.GetSize() / hfaFieldSize; // On Arm64 HFA and HVA values are returned in s0-s3, d0-d3, or v0-v3. return CopyStructFromFPRegs(0, cntFPRegs, hfaFieldSize); } } return &pData->floatArgumentRegisters.q[0]; } if (!m_argIterator.GetSig()->IsReturnTypeVoid()) { return &pData->argumentRegisters.x[0]; } return nullptr; } #undef PROFILE_ENTER #undef PROFILE_LEAVE #undef PROFILE_TAILCALL #endif // PROFILING_SUPPORTED
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "common.h" #ifdef PROFILING_SUPPORTED #include "proftoeeinterfaceimpl.h" #define PROFILE_ENTER 1 #define PROFILE_LEAVE 2 #define PROFILE_TAILCALL 4 #define PROFILE_PLATFORM_SPECIFIC_DATA_BUFFER_SIZE (NUM_FLOAT_ARGUMENT_REGISTERS * sizeof(double)) typedef struct _PROFILE_PLATFORM_SPECIFIC_DATA { void* Fp; void* Pc; void* x8; ArgumentRegisters argumentRegisters; FunctionID functionId; FloatArgumentRegisters floatArgumentRegisters; void* probeSp; void* profiledSp; void* hiddenArg; UINT32 flags; UINT32 unused; BYTE buffer[PROFILE_PLATFORM_SPECIFIC_DATA_BUFFER_SIZE]; } PROFILE_PLATFORM_SPECIFIC_DATA, *PPROFILE_PLATFORM_SPECIFIC_DATA; UINT_PTR ProfileGetIPFromPlatformSpecificHandle(void* pPlatformSpecificHandle) { LIMITED_METHOD_CONTRACT; PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(pPlatformSpecificHandle); return (UINT_PTR)pData->Pc; } void ProfileSetFunctionIDInPlatformSpecificHandle(void* pPlatformSpecificHandle, FunctionID functionId) { LIMITED_METHOD_CONTRACT; _ASSERTE(pPlatformSpecificHandle != nullptr); _ASSERTE(functionId != 0); PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(pPlatformSpecificHandle); pData->functionId = functionId; } ProfileArgIterator::ProfileArgIterator(MetaSig* pSig, void* pPlatformSpecificHandle) : m_argIterator(pSig), m_bufferPos(0) { WRAPPER_NO_CONTRACT; _ASSERTE(pSig != nullptr); _ASSERTE(pPlatformSpecificHandle != nullptr); m_handle = pPlatformSpecificHandle; PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(pPlatformSpecificHandle); ZeroMemory(pData->buffer, PROFILE_PLATFORM_SPECIFIC_DATA_BUFFER_SIZE); #ifdef _DEBUG // Unwind a frame and get the SP for the profiled method to make sure it matches // what the JIT gave us // Setup the context to represent the frame that called ProfileEnterNaked CONTEXT ctx; memset(&ctx, 0, sizeof(CONTEXT)); ctx.Sp = (DWORD64)pData->probeSp; ctx.Fp = (DWORD64)pData->Fp; ctx.Pc = (DWORD64)pData->Pc; // Walk up a frame to the caller frame (called the managed method which called ProfileEnterNaked) Thread::VirtualUnwindCallFrame(&ctx); _ASSERTE(pData->profiledSp == (void*)ctx.Sp); #endif // Get the hidden arg if there is one MethodDesc* pMD = FunctionIdToMethodDesc(pData->functionId); if ((pData->hiddenArg == nullptr) && (pMD->RequiresInstArg() || pMD->AcquiresInstMethodTableFromThis())) { if ((pData->flags & PROFILE_ENTER) != 0) { if (pMD->AcquiresInstMethodTableFromThis()) { pData->hiddenArg = GetThis(); } else { // On ARM64 the generic instantiation parameter comes after the optional "this" pointer. if (m_argIterator.HasThis()) { pData->hiddenArg = (void*)pData->argumentRegisters.x[1]; } else { pData->hiddenArg = (void*)pData->argumentRegisters.x[0]; } } } else { EECodeInfo codeInfo((PCODE)pData->Pc); // We want to pass the caller SP here. pData->hiddenArg = EECodeManager::GetExactGenericsToken((SIZE_T)(pData->profiledSp), &codeInfo); } } } ProfileArgIterator::~ProfileArgIterator() { LIMITED_METHOD_CONTRACT; m_handle = nullptr; } LPVOID ProfileArgIterator::CopyStructFromFPRegs(int firstFPReg, int numFPRegs, int hfaFieldSize) { WRAPPER_NO_CONTRACT; PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(m_handle); if (hfaFieldSize == 8) { UINT64* pDest = (UINT64*)&pData->buffer[m_bufferPos]; for (int i = 0; i < numFPRegs; ++i) { pDest[i] = (UINT64)pData->floatArgumentRegisters.q[firstFPReg + i].Low; } m_bufferPos += numFPRegs * sizeof(UINT64); return pDest; } else { _ASSERTE(hfaFieldSize == 4); UINT32* pDest = (UINT32*)&pData->buffer[m_bufferPos]; for (int i = 0; i < numFPRegs; ++i) { pDest[i] = (UINT32)pData->floatArgumentRegisters.q[firstFPReg + i].Low; } m_bufferPos += numFPRegs * sizeof(UINT32); return pDest; } } LPVOID ProfileArgIterator::GetNextArgAddr() { WRAPPER_NO_CONTRACT; _ASSERTE(m_handle != nullptr); PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(m_handle); if ((pData->flags & (PROFILE_LEAVE | PROFILE_TAILCALL)) != 0) { _ASSERTE(!"GetNextArgAddr() - arguments are not available in leave and tailcall probes"); return nullptr; } int argOffset = m_argIterator.GetNextOffset(); if (argOffset == TransitionBlock::InvalidOffset) { return nullptr; } if (TransitionBlock::IsFloatArgumentRegisterOffset(argOffset)) { ArgLocDesc argLocDesc; m_argIterator.GetArgLoc(argOffset, &argLocDesc); if (argLocDesc.m_cFloatReg > 1) { if (argLocDesc.m_hfaFieldSize != 16) { return CopyStructFromFPRegs(argLocDesc.m_idxFloatReg, argLocDesc.m_cFloatReg, argLocDesc.m_hfaFieldSize); } } #ifdef _DEBUG else { _ASSERTE(argLocDesc.m_cFloatReg == 1); } #endif return (LPBYTE)&pData->floatArgumentRegisters.q[argLocDesc.m_idxFloatReg]; } LPVOID pArg = nullptr; if (TransitionBlock::IsArgumentRegisterOffset(argOffset)) { pArg = (LPBYTE)&pData->argumentRegisters + (argOffset - TransitionBlock::GetOffsetOfArgumentRegisters()); } else { _ASSERTE(TransitionBlock::IsStackArgumentOffset(argOffset)); pArg = (LPBYTE)pData->profiledSp + (argOffset - TransitionBlock::GetOffsetOfArgs()); } if (m_argIterator.IsArgPassedByRef()) { pArg = *(LPVOID*)pArg; } return pArg; } LPVOID ProfileArgIterator::GetHiddenArgValue(void) { LIMITED_METHOD_CONTRACT; PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(m_handle); return pData->hiddenArg; } LPVOID ProfileArgIterator::GetThis(void) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; PROFILE_PLATFORM_SPECIFIC_DATA* pData = (PROFILE_PLATFORM_SPECIFIC_DATA*)m_handle; MethodDesc* pMD = FunctionIdToMethodDesc(pData->functionId); // We guarantee to return the correct "this" pointer in the enter probe. // For the leave and tailcall probes, we only return a valid "this" pointer if it is the generics token. if (pData->hiddenArg != nullptr) { if (pMD->AcquiresInstMethodTableFromThis()) { return pData->hiddenArg; } } if ((pData->flags & PROFILE_ENTER) != 0) { if (m_argIterator.HasThis()) { return (LPVOID)pData->argumentRegisters.x[0]; } } return nullptr; } LPVOID ProfileArgIterator::GetReturnBufferAddr(void) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; PROFILE_PLATFORM_SPECIFIC_DATA* pData = reinterpret_cast<PROFILE_PLATFORM_SPECIFIC_DATA*>(m_handle); if ((pData->flags & PROFILE_TAILCALL) != 0) { _ASSERTE(!"GetReturnBufferAddr() - return buffer address is not available in tailcall probe"); return nullptr; } if (m_argIterator.HasRetBuffArg()) { if ((pData->flags & PROFILE_ENTER) != 0) { return (LPVOID)pData->x8; } else { // On ARM64 there is no requirement for the method to preserve the value stored in x8. // In order to workaround this JIT will explicitly return the return buffer address in x0. _ASSERTE((pData->flags & PROFILE_LEAVE) != 0); return (LPVOID)pData->argumentRegisters.x[0]; } } UINT fpReturnSize = m_argIterator.GetFPReturnSize(); if (fpReturnSize != 0) { TypeHandle thReturnValueType; m_argIterator.GetSig()->GetReturnTypeNormalized(&thReturnValueType); if (!thReturnValueType.IsNull() && thReturnValueType.IsHFA()) { CorInfoHFAElemType hfaElemType = thReturnValueType.GetHFAType(); if (hfaElemType == CORINFO_HFA_ELEM_VECTOR128) { return &pData->floatArgumentRegisters.q[0]; } else { int hfaFieldSize = 8; if (hfaElemType == CORINFO_HFA_ELEM_FLOAT) { hfaFieldSize = 4; } #ifdef _DEBUG else { _ASSERTE((hfaElemType == CORINFO_HFA_ELEM_DOUBLE) || (hfaElemType == CORINFO_HFA_ELEM_VECTOR64)); } #endif const int cntFPRegs = thReturnValueType.GetSize() / hfaFieldSize; // On Arm64 HFA and HVA values are returned in s0-s3, d0-d3, or v0-v3. return CopyStructFromFPRegs(0, cntFPRegs, hfaFieldSize); } } return &pData->floatArgumentRegisters.q[0]; } if (!m_argIterator.GetSig()->IsReturnTypeVoid()) { return &pData->argumentRegisters.x[0]; } return nullptr; } #undef PROFILE_ENTER #undef PROFILE_LEAVE #undef PROFILE_TAILCALL #endif // PROFILING_SUPPORTED
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/inc/rt/cpp/fcntl.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // =========================================================================== // File: fcntl.h // // =========================================================================== // dummy fcntl.h for PAL #include "palrt.h"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // // =========================================================================== // File: fcntl.h // // =========================================================================== // dummy fcntl.h for PAL #include "palrt.h"
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/mono/mono/metadata/cominterop.h
/** * \file * COM Interop Support * * * (C) 2002 Ximian, Inc. http://www.ximian.com * */ #ifndef __MONO_COMINTEROP_H__ #define __MONO_COMINTEROP_H__ #include <mono/metadata/method-builder.h> #include <mono/metadata/method-builder-ilgen.h> #include <mono/metadata/marshal.h> void mono_cominterop_init (void); void mono_mb_emit_cominterop_get_function_pointer (MonoMethodBuilder *mb, MonoMethod* method); void mono_mb_emit_cominterop_call_function_pointer (MonoMethodBuilder *mb, MonoMethodSignature *sig); void mono_mb_emit_cominterop_call (MonoMethodBuilder *mb, MonoMethodSignature *sig, MonoMethod* method); void mono_cominterop_emit_ptr_to_object_conv (MonoMethodBuilder *mb, MonoType *type, MonoMarshalConv conv, MonoMarshalSpec *mspec); void mono_cominterop_emit_object_to_ptr_conv (MonoMethodBuilder *mb, MonoType *type, MonoMarshalConv conv, MonoMarshalSpec *mspec); MonoMethod * mono_cominterop_get_native_wrapper (MonoMethod *method); MonoMethod * mono_cominterop_get_invoke (MonoMethod *method); int mono_cominterop_emit_marshal_com_interface (EmitMarshalContext *m, int argnum, MonoType *t, MonoMarshalSpec *spec, int conv_arg, MonoType **conv_arg_type, MarshalAction action); int mono_cominterop_emit_marshal_safearray (EmitMarshalContext *m, int argnum, MonoType *t, MonoMarshalSpec *spec, int conv_arg, MonoType **conv_arg_type, MarshalAction action); MONO_API MONO_RT_EXTERNAL_ONLY MonoString * mono_string_from_bstr (/*mono_bstr*/gpointer bstr); MonoStringHandle mono_string_from_bstr_checked (mono_bstr_const bstr, MonoError *error); MONO_API void mono_free_bstr (/*mono_bstr_const*/gpointer bstr); MonoClass* mono_class_try_get_com_object_class (void); void* mono_cominterop_get_com_interface (MonoObject* object, MonoClass* ic, MonoError *error); gboolean mono_cominterop_is_interface (MonoClass* klass); gboolean mono_cominterop_method_com_visible (MonoMethod *method); #endif /* __MONO_COMINTEROP_H__ */
/** * \file * COM Interop Support * * * (C) 2002 Ximian, Inc. http://www.ximian.com * */ #ifndef __MONO_COMINTEROP_H__ #define __MONO_COMINTEROP_H__ #include <mono/metadata/method-builder.h> #include <mono/metadata/method-builder-ilgen.h> #include <mono/metadata/marshal.h> void mono_cominterop_init (void); void mono_mb_emit_cominterop_get_function_pointer (MonoMethodBuilder *mb, MonoMethod* method); void mono_mb_emit_cominterop_call_function_pointer (MonoMethodBuilder *mb, MonoMethodSignature *sig); void mono_mb_emit_cominterop_call (MonoMethodBuilder *mb, MonoMethodSignature *sig, MonoMethod* method); void mono_cominterop_emit_ptr_to_object_conv (MonoMethodBuilder *mb, MonoType *type, MonoMarshalConv conv, MonoMarshalSpec *mspec); void mono_cominterop_emit_object_to_ptr_conv (MonoMethodBuilder *mb, MonoType *type, MonoMarshalConv conv, MonoMarshalSpec *mspec); MonoMethod * mono_cominterop_get_native_wrapper (MonoMethod *method); MonoMethod * mono_cominterop_get_invoke (MonoMethod *method); int mono_cominterop_emit_marshal_com_interface (EmitMarshalContext *m, int argnum, MonoType *t, MonoMarshalSpec *spec, int conv_arg, MonoType **conv_arg_type, MarshalAction action); int mono_cominterop_emit_marshal_safearray (EmitMarshalContext *m, int argnum, MonoType *t, MonoMarshalSpec *spec, int conv_arg, MonoType **conv_arg_type, MarshalAction action); MONO_API MONO_RT_EXTERNAL_ONLY MonoString * mono_string_from_bstr (/*mono_bstr*/gpointer bstr); MonoStringHandle mono_string_from_bstr_checked (mono_bstr_const bstr, MonoError *error); MONO_API void mono_free_bstr (/*mono_bstr_const*/gpointer bstr); MonoClass* mono_class_try_get_com_object_class (void); void* mono_cominterop_get_com_interface (MonoObject* object, MonoClass* ic, MonoError *error); gboolean mono_cominterop_is_interface (MonoClass* klass); gboolean mono_cominterop_method_com_visible (MonoMethod *method); #endif /* __MONO_COMINTEROP_H__ */
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/native/public/mono/metadata/sgen-bridge.h
/** * \file * Copyright 2011 Novell, Inc. * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ /* * The bridge is a mechanism for SGen to let clients override the death of some * unreachable objects. We use it in monodroid to do garbage collection across * the Mono and Java heaps. * * The client (Monodroid) can designate some objects as "bridged", which means * that they participate in the bridge processing step once SGen considers them * unreachable, i.e., dead. Bridged objects must be registered for * finalization. * * When SGen is done marking, it puts together a list of all dead bridged * objects. This is passed to the bridge processor, which does an analysis to * simplify the graph: It replaces strongly-connected components with single * nodes, and may remove nodes corresponding to components which do not contain * bridged objects. * * The output of the SCC analysis is passed to the client's `cross_references()` * callback. This consists of 2 arrays, an array of SCCs (MonoGCBridgeSCC), * and an array of "xrefs" (edges between SCCs, MonoGCBridgeXRef). Edges are * encoded as pairs of "API indices", ie indexes in the SCC array. The client * is expected to set the `is_alive` flag on those strongly connected components * that it wishes to be kept alive. * * In monodroid each bridged object has a corresponding Java mirror object. In * the bridge callback it reifies the Mono object graph in the Java heap so that * the full, combined object graph is now instantiated on the Java side. Then * it triggers a Java GC, waits for it to finish, and checks which of the Java * mirror objects are still alive. For those it sets the `is_alive` flag and * returns from the callback. * * The SCC analysis is done while the world is stopped, but the callback is made * with the world running again. Weak links to bridged objects and other * objects reachable from them are kept until the callback returns, at which * point all links to bridged objects that don't have `is_alive` set are nulled. * Note that weak links to non-bridged objects reachable from bridged objects * are not nulled. This might be considered a bug. * * There are three different implementations of the bridge processor, each of * which implements 8 callbacks (see SgenBridgeProcessor). The implementations * differ in the algorithm they use to compute the "simplified" SCC graph. */ #ifndef _MONO_SGEN_BRIDGE_H_ #define _MONO_SGEN_BRIDGE_H_ #include <mono/utils/mono-publib.h> #include <mono/metadata/details/sgen-bridge-types.h> MONO_BEGIN_DECLS #define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args; #include <mono/metadata/details/sgen-bridge-functions.h> #undef MONO_API_FUNCTION MONO_END_DECLS #endif
/** * \file * Copyright 2011 Novell, Inc. * * Licensed under the MIT license. See LICENSE file in the project root for full license information. */ /* * The bridge is a mechanism for SGen to let clients override the death of some * unreachable objects. We use it in monodroid to do garbage collection across * the Mono and Java heaps. * * The client (Monodroid) can designate some objects as "bridged", which means * that they participate in the bridge processing step once SGen considers them * unreachable, i.e., dead. Bridged objects must be registered for * finalization. * * When SGen is done marking, it puts together a list of all dead bridged * objects. This is passed to the bridge processor, which does an analysis to * simplify the graph: It replaces strongly-connected components with single * nodes, and may remove nodes corresponding to components which do not contain * bridged objects. * * The output of the SCC analysis is passed to the client's `cross_references()` * callback. This consists of 2 arrays, an array of SCCs (MonoGCBridgeSCC), * and an array of "xrefs" (edges between SCCs, MonoGCBridgeXRef). Edges are * encoded as pairs of "API indices", ie indexes in the SCC array. The client * is expected to set the `is_alive` flag on those strongly connected components * that it wishes to be kept alive. * * In monodroid each bridged object has a corresponding Java mirror object. In * the bridge callback it reifies the Mono object graph in the Java heap so that * the full, combined object graph is now instantiated on the Java side. Then * it triggers a Java GC, waits for it to finish, and checks which of the Java * mirror objects are still alive. For those it sets the `is_alive` flag and * returns from the callback. * * The SCC analysis is done while the world is stopped, but the callback is made * with the world running again. Weak links to bridged objects and other * objects reachable from them are kept until the callback returns, at which * point all links to bridged objects that don't have `is_alive` set are nulled. * Note that weak links to non-bridged objects reachable from bridged objects * are not nulled. This might be considered a bug. * * There are three different implementations of the bridge processor, each of * which implements 8 callbacks (see SgenBridgeProcessor). The implementations * differ in the algorithm they use to compute the "simplified" SCC graph. */ #ifndef _MONO_SGEN_BRIDGE_H_ #define _MONO_SGEN_BRIDGE_H_ #include <mono/utils/mono-publib.h> #include <mono/metadata/details/sgen-bridge-types.h> MONO_BEGIN_DECLS #define MONO_API_FUNCTION(ret,name,args) MONO_API ret name args; #include <mono/metadata/details/sgen-bridge-functions.h> #undef MONO_API_FUNCTION MONO_END_DECLS #endif
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/vm/synch.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // #include "common.h" #include "corhost.h" #include "synch.h" void CLREventBase::CreateAutoEvent (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { THROWS; GC_NOTRIGGER; // disallow creation of Crst before EE starts // Can not assert here. ASP.NET uses our Threadpool before EE is started. PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); PRECONDITION((!IsOSEvent())); } CONTRACTL_END; SetAutoEvent(); { HANDLE h = WszCreateEvent(NULL,FALSE,bInitialState,NULL); if (h == NULL) { ThrowOutOfMemory(); } m_handle = h; } } BOOL CLREventBase::CreateAutoEventNoThrow (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { NOTHROW; GC_NOTRIGGER; // disallow creation of Crst before EE starts // Can not assert here. ASP.NET uses our Threadpool before EE is started. PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); PRECONDITION((!IsOSEvent())); } CONTRACTL_END; EX_TRY { CreateAutoEvent(bInitialState); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); return IsValid(); } void CLREventBase::CreateManualEvent (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { THROWS; GC_NOTRIGGER; // disallow creation of Crst before EE starts // Can not assert here. ASP.NET uses our Threadpool before EE is started. PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); PRECONDITION((!IsOSEvent())); } CONTRACTL_END; { HANDLE h = WszCreateEvent(NULL,TRUE,bInitialState,NULL); if (h == NULL) { ThrowOutOfMemory(); } m_handle = h; } } BOOL CLREventBase::CreateManualEventNoThrow (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { NOTHROW; GC_NOTRIGGER; // disallow creation of Crst before EE starts // Can not assert here. ASP.NET uses our Threadpool before EE is started. PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); PRECONDITION((!IsOSEvent())); } CONTRACTL_END; EX_TRY { CreateManualEvent(bInitialState); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); return IsValid(); } void CLREventBase::CreateMonitorEvent(SIZE_T Cookie) { CONTRACTL { THROWS; GC_NOTRIGGER; // disallow creation of Crst before EE starts PRECONDITION((g_fEEStarted)); PRECONDITION((GetThreadNULLOk() != NULL)); PRECONDITION((!IsOSEvent())); } CONTRACTL_END; // thread-safe SetAutoEvent FastInterlockOr(&m_dwFlags, CLREVENT_FLAGS_AUTO_EVENT); { HANDLE h = WszCreateEvent(NULL,FALSE,FALSE,NULL); if (h == NULL) { ThrowOutOfMemory(); } if (FastInterlockCompareExchangePointer(&m_handle, h, INVALID_HANDLE_VALUE) != INVALID_HANDLE_VALUE) { // We lost the race CloseHandle(h); } } // thread-safe SetInDeadlockDetection FastInterlockOr(&m_dwFlags, CLREVENT_FLAGS_IN_DEADLOCK_DETECTION); for (;;) { LONG oldFlags = m_dwFlags; if (oldFlags & CLREVENT_FLAGS_MONITOREVENT_ALLOCATED) { // Other thread has set the flag already. Nothing left for us to do. break; } LONG newFlags = oldFlags | CLREVENT_FLAGS_MONITOREVENT_ALLOCATED; if (FastInterlockCompareExchange((LONG*)&m_dwFlags, newFlags, oldFlags) != oldFlags) { // We lost the race continue; } // Because we set the allocated bit, we are the ones to do the signalling if (oldFlags & CLREVENT_FLAGS_MONITOREVENT_SIGNALLED) { // We got the honour to signal the event Set(); } break; } } void CLREventBase::SetMonitorEvent() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // SetMonitorEvent is robust against initialization races. It is possible to // call CLREvent::SetMonitorEvent on event that has not been initialialized yet by CreateMonitorEvent. // CreateMonitorEvent will signal the event once it is created if it happens. for (;;) { LONG oldFlags = m_dwFlags; if (oldFlags & CLREVENT_FLAGS_MONITOREVENT_ALLOCATED) { // Event has been allocated already. Use the regular codepath. Set(); break; } LONG newFlags = oldFlags | CLREVENT_FLAGS_MONITOREVENT_SIGNALLED; if (FastInterlockCompareExchange((LONG*)&m_dwFlags, newFlags, oldFlags) != oldFlags) { // We lost the race continue; } break; } } void CLREventBase::CreateOSAutoEvent (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { THROWS; GC_NOTRIGGER; // disallow creation of Crst before EE starts PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); } CONTRACTL_END; // Can not assert here. ASP.NET uses our Threadpool before EE is started. //_ASSERTE (g_fEEStarted); SetOSEvent(); SetAutoEvent(); HANDLE h = WszCreateEvent(NULL,FALSE,bInitialState,NULL); if (h == NULL) { ThrowOutOfMemory(); } m_handle = h; } BOOL CLREventBase::CreateOSAutoEventNoThrow (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { NOTHROW; GC_NOTRIGGER; // disallow creation of Crst before EE starts PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); } CONTRACTL_END; EX_TRY { CreateOSAutoEvent(bInitialState); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); return IsValid(); } void CLREventBase::CreateOSManualEvent (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { THROWS; GC_NOTRIGGER; // disallow creation of Crst before EE starts PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); } CONTRACTL_END; // Can not assert here. ASP.NET uses our Threadpool before EE is started. //_ASSERTE (g_fEEStarted); SetOSEvent(); HANDLE h = WszCreateEvent(NULL,TRUE,bInitialState,NULL); if (h == NULL) { ThrowOutOfMemory(); } m_handle = h; } BOOL CLREventBase::CreateOSManualEventNoThrow (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { NOTHROW; GC_NOTRIGGER; // disallow creation of Crst before EE starts PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); } CONTRACTL_END; EX_TRY { CreateOSManualEvent(bInitialState); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); return IsValid(); } void CLREventBase::CloseEvent() { CONTRACTL { NOTHROW; if (IsInDeadlockDetection()) {GC_TRIGGERS;} else {GC_NOTRIGGER;} } CONTRACTL_END; GCX_MAYBE_PREEMP(IsInDeadlockDetection() && IsValid()); _ASSERTE(Thread::Debug_AllowCallout()); if (m_handle != INVALID_HANDLE_VALUE) { { CloseHandle(m_handle); } m_handle = INVALID_HANDLE_VALUE; } m_dwFlags = 0; } BOOL CLREventBase::Set() { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION((m_handle != INVALID_HANDLE_VALUE)); } CONTRACTL_END; _ASSERTE(Thread::Debug_AllowCallout()); { return SetEvent(m_handle); } } BOOL CLREventBase::Reset() { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION((m_handle != INVALID_HANDLE_VALUE)); } CONTRACTL_END; _ASSERTE(Thread::Debug_AllowCallout()); { return ResetEvent(m_handle); } } static DWORD CLREventWaitHelper2(HANDLE handle, DWORD dwMilliseconds, BOOL alertable) { STATIC_CONTRACT_THROWS; return WaitForSingleObjectEx(handle,dwMilliseconds,alertable); } static DWORD CLREventWaitHelper(HANDLE handle, DWORD dwMilliseconds, BOOL alertable) { STATIC_CONTRACT_NOTHROW; struct Param { HANDLE handle; DWORD dwMilliseconds; BOOL alertable; DWORD result; } param; param.handle = handle; param.dwMilliseconds = dwMilliseconds; param.alertable = alertable; param.result = WAIT_FAILED; // Can not use EX_TRY/CATCH. EX_CATCH toggles GC mode. This function is called // through RareDisablePreemptiveGC. EX_CATCH breaks profiler callback. PAL_TRY(Param *, pParam, &param) { // Need to move to another helper (cannot have SEH and C++ destructors // on automatic variables in one function) pParam->result = CLREventWaitHelper2(pParam->handle, pParam->dwMilliseconds, pParam->alertable); } PAL_EXCEPT (EXCEPTION_EXECUTE_HANDLER) { param.result = WAIT_FAILED; } PAL_ENDTRY; return param.result; } DWORD CLREventBase::Wait(DWORD dwMilliseconds, BOOL alertable, PendingSync *syncState) { WRAPPER_NO_CONTRACT; return WaitEx(dwMilliseconds, alertable?WaitMode_Alertable:WaitMode_None,syncState); } DWORD CLREventBase::WaitEx(DWORD dwMilliseconds, WaitMode mode, PendingSync *syncState) { BOOL alertable = (mode & WaitMode_Alertable)!=0; CONTRACTL { if (alertable) { THROWS; // Thread::DoAppropriateWait can throw } else { NOTHROW; } if (GetThreadNULLOk()) { if (alertable) GC_TRIGGERS; else GC_NOTRIGGER; } else { DISABLED(GC_TRIGGERS); } PRECONDITION(m_handle != INVALID_HANDLE_VALUE); // Handle has to be valid } CONTRACTL_END; _ASSERTE(Thread::Debug_AllowCallout()); Thread * pThread = GetThreadNULLOk(); #ifdef _DEBUG // If a CLREvent is OS event only, we can not wait for the event on a managed thread if (IsOSEvent()) _ASSERTE (pThread == NULL); #endif _ASSERTE((pThread != NULL) || !g_fEEStarted || dbgOnly_IsSpecialEEThread()); { if (pThread && alertable) { DWORD dwRet = WAIT_FAILED; dwRet = pThread->DoAppropriateWait(1, &m_handle, FALSE, dwMilliseconds, mode, syncState); return dwRet; } else { _ASSERTE (syncState == NULL); return CLREventWaitHelper(m_handle,dwMilliseconds,alertable); } } } void CLRSemaphore::Create (DWORD dwInitial, DWORD dwMax) { CONTRACTL { THROWS; GC_NOTRIGGER; PRECONDITION(m_handle == INVALID_HANDLE_VALUE); } CONTRACTL_END; { HANDLE h = WszCreateSemaphore(NULL,dwInitial,dwMax,NULL); if (h == NULL) { ThrowOutOfMemory(); } m_handle = h; } } void CLRSemaphore::Close() { LIMITED_METHOD_CONTRACT; if (m_handle != INVALID_HANDLE_VALUE) { CloseHandle(m_handle); m_handle = INVALID_HANDLE_VALUE; } } BOOL CLRSemaphore::Release(LONG lReleaseCount, LONG *lpPreviousCount) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_handle != INVALID_HANDLE_VALUE); } CONTRACTL_END; { return ::ReleaseSemaphore(m_handle, lReleaseCount, lpPreviousCount); } } DWORD CLRSemaphore::Wait(DWORD dwMilliseconds, BOOL alertable) { CONTRACTL { if (GetThreadNULLOk() && alertable) { THROWS; // Thread::DoAppropriateWait can throw } else { NOTHROW; } if (GetThreadNULLOk()) { if (alertable) GC_TRIGGERS; else GC_NOTRIGGER; } else { DISABLED(GC_TRIGGERS); } PRECONDITION(m_handle != INVALID_HANDLE_VALUE); // Invalid to have invalid handle } CONTRACTL_END; Thread *pThread = GetThreadNULLOk(); _ASSERTE (pThread || !g_fEEStarted || dbgOnly_IsSpecialEEThread()); { // TODO wwl: if alertable is FALSE, do we support a host to break a deadlock? // Currently we can not call through DoAppropriateWait because of CannotThrowComplusException. // We should re-consider this after our code is exception safe. if (pThread && alertable) { return pThread->DoAppropriateWait(1, &m_handle, FALSE, dwMilliseconds, alertable?WaitMode_Alertable:WaitMode_None, NULL); } else { DWORD result = WAIT_FAILED; EX_TRY { result = WaitForSingleObjectEx(m_handle,dwMilliseconds,alertable); } EX_CATCH { result = WAIT_FAILED; } EX_END_CATCH(SwallowAllExceptions); return result; } } } void CLRLifoSemaphore::Create(INT32 initialSignalCount, INT32 maximumSignalCount) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE(maximumSignalCount > 0); _ASSERTE(initialSignalCount <= maximumSignalCount); _ASSERTE(m_handle == nullptr); #ifdef TARGET_UNIX HANDLE h = WszCreateSemaphore(nullptr, 0, maximumSignalCount, nullptr); #else // !TARGET_UNIX HANDLE h = CreateIoCompletionPort(INVALID_HANDLE_VALUE, nullptr, 0, maximumSignalCount); #endif // TARGET_UNIX if (h == nullptr) { ThrowOutOfMemory(); } m_handle = h; m_counts.signalCount = initialSignalCount; INDEBUG(m_maximumSignalCount = maximumSignalCount); } void CLRLifoSemaphore::Close() { LIMITED_METHOD_CONTRACT; if (m_handle == nullptr) { return; } CloseHandle(m_handle); m_handle = nullptr; } bool CLRLifoSemaphore::WaitForSignal(DWORD timeoutMs) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE(timeoutMs != 0); _ASSERTE(m_handle != nullptr); _ASSERTE(m_counts.VolatileLoadWithoutBarrier().waiterCount != (UINT16)0); while (true) { // Wait for a signal BOOL waitSuccessful; { #ifdef TARGET_UNIX // Do a prioritized wait to get LIFO waiter release order DWORD waitResult = PAL_WaitForSingleObjectPrioritized(m_handle, timeoutMs); _ASSERTE(waitResult == WAIT_OBJECT_0 || waitResult == WAIT_TIMEOUT); waitSuccessful = waitResult == WAIT_OBJECT_0; #else // !TARGET_UNIX // I/O completion ports release waiters in LIFO order, see // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365198(v=vs.85).aspx DWORD numberOfBytes; ULONG_PTR completionKey; LPOVERLAPPED overlapped; waitSuccessful = GetQueuedCompletionStatus(m_handle, &numberOfBytes, &completionKey, &overlapped, timeoutMs); _ASSERTE(waitSuccessful || GetLastError() == WAIT_TIMEOUT); _ASSERTE(overlapped == nullptr); #endif // TARGET_UNIX } if (!waitSuccessful) { // Unregister the waiter. The wait subsystem used above guarantees that a thread that wakes due to a timeout does // not observe a signal to the object being waited upon. Counts toSubtract; ++toSubtract.waiterCount; Counts countsBeforeUpdate = m_counts.ExchangeAdd(-toSubtract); _ASSERTE(countsBeforeUpdate.waiterCount != (UINT16)0); return false; } // Unregister the waiter if this thread will not be waiting anymore, and try to acquire the semaphore Counts counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { _ASSERTE(counts.waiterCount != (UINT16)0); Counts newCounts = counts; if (counts.signalCount != 0) { --newCounts.signalCount; --newCounts.waiterCount; } // This waiter has woken up and this needs to be reflected in the count of waiters signaled to wake if (counts.countOfWaitersSignaledToWake != (UINT8)0) { --newCounts.countOfWaitersSignaledToWake; } Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { if (counts.signalCount != 0) { return true; } break; } counts = countsBeforeUpdate; } } } bool CLRLifoSemaphore::Wait(DWORD timeoutMs) { WRAPPER_NO_CONTRACT; _ASSERTE(m_handle != nullptr); // Acquire the semaphore or register as a waiter Counts counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { _ASSERTE(counts.signalCount <= m_maximumSignalCount); Counts newCounts = counts; if (counts.signalCount != 0) { --newCounts.signalCount; } else if (timeoutMs != 0) { ++newCounts.waiterCount; _ASSERTE(newCounts.waiterCount != (UINT16)0); // overflow check, this many waiters is currently not supported } Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { return counts.signalCount != 0 || (timeoutMs != 0 && WaitForSignal(timeoutMs)); } counts = countsBeforeUpdate; } } bool CLRLifoSemaphore::Wait(DWORD timeoutMs, UINT32 spinCount, UINT32 processorCount) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE(m_handle != nullptr); if (timeoutMs == 0 || spinCount == 0) { return Wait(timeoutMs); } // Try to acquire the semaphore or register as a spinner Counts counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { Counts newCounts = counts; if (counts.signalCount != 0) { --newCounts.signalCount; } else { ++newCounts.spinnerCount; if (newCounts.spinnerCount == (UINT8)0) { // Maximum number of spinners reached, register as a waiter instead --newCounts.spinnerCount; ++newCounts.waiterCount; _ASSERTE(newCounts.waiterCount != (UINT16)0); // overflow check, this many waiters is currently not supported } } Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { if (counts.signalCount != 0) { return true; } if (newCounts.waiterCount != counts.waiterCount) { return WaitForSignal(timeoutMs); } break; } counts = countsBeforeUpdate; } #ifdef TARGET_ARM64 // For now, the spinning changes are disabled on ARM64. The spin loop below replicates how UnfairSemaphore used to spin. // Once more tuning is done on ARM64, it should be possible to come up with a spinning scheme that works well everywhere. int spinCountPerProcessor = spinCount; for (UINT32 i = 1; ; ++i) { // Wait ClrSleepEx(0, false); // Try to acquire the semaphore and unregister as a spinner counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { _ASSERTE(counts.spinnerCount != (UINT8)0); if (counts.signalCount == 0) { break; } Counts newCounts = counts; --newCounts.signalCount; --newCounts.spinnerCount; Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { return true; } counts = countsBeforeUpdate; } // Determine whether to spin further double spinnersPerProcessor = (double)counts.spinnerCount / processorCount; UINT32 spinLimit = (UINT32)(spinCountPerProcessor / spinnersPerProcessor + 0.5); if (i >= spinLimit) { break; } } #else // !TARGET_ARM64 const UINT32 Sleep0Threshold = 10; YieldProcessorNormalizationInfo normalizationInfo; #ifdef TARGET_UNIX // The PAL's wait subsystem is quite slow, spin more to compensate for the more expensive wait spinCount *= 2; #endif // TARGET_UNIX for (UINT32 i = 0; i < spinCount; ++i) { // Wait // // (i - Sleep0Threshold) % 2 != 0: The purpose of this check is to interleave Thread.Yield/Sleep(0) with // Thread.SpinWait. Otherwise, the following issues occur: // - When there are no threads to switch to, Yield and Sleep(0) become no-op and it turns the spin loop into a // busy-spin that may quickly reach the max spin count and cause the thread to enter a wait state. Completing the // spin loop too early can cause excessive context switcing from the wait. // - If there are multiple threads doing Yield and Sleep(0) (typically from the same spin loop due to contention), // they may switch between one another, delaying work that can make progress. if (i < Sleep0Threshold || (i - Sleep0Threshold) % 2 != 0) { YieldProcessorWithBackOffNormalized(normalizationInfo, i); } else { // Not doing SwitchToThread(), it does not seem to have any benefit over Sleep(0) ClrSleepEx(0, false); } // Try to acquire the semaphore and unregister as a spinner counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { _ASSERTE(counts.spinnerCount != (UINT8)0); if (counts.signalCount == 0) { break; } Counts newCounts = counts; --newCounts.signalCount; --newCounts.spinnerCount; Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { return true; } counts = countsBeforeUpdate; } } #endif // TARGET_ARM64 // Unregister as a spinner, and acquire the semaphore or register as a waiter counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { _ASSERTE(counts.spinnerCount != (UINT8)0); Counts newCounts = counts; --newCounts.spinnerCount; if (counts.signalCount != 0) { --newCounts.signalCount; } else { ++newCounts.waiterCount; _ASSERTE(newCounts.waiterCount != (UINT16)0); // overflow check, this many waiters is currently not supported } Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { return counts.signalCount != 0 || WaitForSignal(timeoutMs); } counts = countsBeforeUpdate; } } void CLRLifoSemaphore::Release(INT32 releaseCount) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE(releaseCount > 0); _ASSERTE((UINT32)releaseCount <= m_maximumSignalCount); _ASSERTE(m_handle != INVALID_HANDLE_VALUE); INT32 countOfWaitersToWake; Counts counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { Counts newCounts = counts; // Increase the signal count. The addition doesn't overflow because of the limit on the max signal count in Create. newCounts.signalCount += releaseCount; _ASSERTE(newCounts.signalCount > counts.signalCount); // Determine how many waiters to wake, taking into account how many spinners and waiters there are and how many waiters // have previously been signaled to wake but have not yet woken countOfWaitersToWake = (INT32)min(newCounts.signalCount, (UINT32)newCounts.waiterCount + newCounts.spinnerCount) - newCounts.spinnerCount - newCounts.countOfWaitersSignaledToWake; if (countOfWaitersToWake > 0) { // Ideally, limiting to a maximum of releaseCount would not be necessary and could be an assert instead, but since // WaitForSignal() does not have enough information to tell whether a woken thread was signaled, and due to the cap // below, it's possible for countOfWaitersSignaledToWake to be less than the number of threads that have actually // been signaled to wake. if (countOfWaitersToWake > releaseCount) { countOfWaitersToWake = releaseCount; } // Cap countOfWaitersSignaledToWake to its max value. It's ok to ignore some woken threads in this count, it just // means some more threads will be woken next time. Typically, it won't reach the max anyway. newCounts.countOfWaitersSignaledToWake += (UINT8)min(countOfWaitersToWake, (INT32)UINT8_MAX); if (newCounts.countOfWaitersSignaledToWake <= counts.countOfWaitersSignaledToWake) { newCounts.countOfWaitersSignaledToWake = UINT8_MAX; } } Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { _ASSERTE((UINT32)releaseCount <= m_maximumSignalCount - counts.signalCount); if (countOfWaitersToWake <= 0) { return; } break; } counts = countsBeforeUpdate; } // Wake waiters #ifdef TARGET_UNIX BOOL released = ReleaseSemaphore(m_handle, countOfWaitersToWake, nullptr); _ASSERTE(released); #else // !TARGET_UNIX while (--countOfWaitersToWake >= 0) { while (!PostQueuedCompletionStatus(m_handle, 0, 0, nullptr)) { // Probably out of memory. It's not valid to stop and throw here, so try again after a delay. ClrSleepEx(1, false); } } #endif // TARGET_UNIX } void CLRMutex::Create(LPSECURITY_ATTRIBUTES lpMutexAttributes, BOOL bInitialOwner, LPCTSTR lpName) { CONTRACTL { THROWS; GC_NOTRIGGER; PRECONDITION(m_handle == INVALID_HANDLE_VALUE && m_handle != NULL); } CONTRACTL_END; m_handle = WszCreateMutex(lpMutexAttributes,bInitialOwner,lpName); if (m_handle == NULL) { ThrowOutOfMemory(); } } void CLRMutex::Close() { LIMITED_METHOD_CONTRACT; if (m_handle != INVALID_HANDLE_VALUE) { CloseHandle(m_handle); m_handle = INVALID_HANDLE_VALUE; } } BOOL CLRMutex::Release() { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_handle != INVALID_HANDLE_VALUE && m_handle != NULL); } CONTRACTL_END; BOOL fRet = ReleaseMutex(m_handle); if (fRet) { EE_LOCK_RELEASED(this); } return fRet; } DWORD CLRMutex::Wait(DWORD dwMilliseconds, BOOL bAlertable) { CONTRACTL { NOTHROW; GC_NOTRIGGER; CAN_TAKE_LOCK; PRECONDITION(m_handle != INVALID_HANDLE_VALUE && m_handle != NULL); } CONTRACTL_END; DWORD fRet = WaitForSingleObjectEx(m_handle, dwMilliseconds, bAlertable); if (fRet == WAIT_OBJECT_0) { EE_LOCK_TAKEN(this); } return fRet; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // // #include "common.h" #include "corhost.h" #include "synch.h" void CLREventBase::CreateAutoEvent (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { THROWS; GC_NOTRIGGER; // disallow creation of Crst before EE starts // Can not assert here. ASP.NET uses our Threadpool before EE is started. PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); PRECONDITION((!IsOSEvent())); } CONTRACTL_END; SetAutoEvent(); { HANDLE h = WszCreateEvent(NULL,FALSE,bInitialState,NULL); if (h == NULL) { ThrowOutOfMemory(); } m_handle = h; } } BOOL CLREventBase::CreateAutoEventNoThrow (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { NOTHROW; GC_NOTRIGGER; // disallow creation of Crst before EE starts // Can not assert here. ASP.NET uses our Threadpool before EE is started. PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); PRECONDITION((!IsOSEvent())); } CONTRACTL_END; EX_TRY { CreateAutoEvent(bInitialState); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); return IsValid(); } void CLREventBase::CreateManualEvent (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { THROWS; GC_NOTRIGGER; // disallow creation of Crst before EE starts // Can not assert here. ASP.NET uses our Threadpool before EE is started. PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); PRECONDITION((!IsOSEvent())); } CONTRACTL_END; { HANDLE h = WszCreateEvent(NULL,TRUE,bInitialState,NULL); if (h == NULL) { ThrowOutOfMemory(); } m_handle = h; } } BOOL CLREventBase::CreateManualEventNoThrow (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { NOTHROW; GC_NOTRIGGER; // disallow creation of Crst before EE starts // Can not assert here. ASP.NET uses our Threadpool before EE is started. PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); PRECONDITION((!IsOSEvent())); } CONTRACTL_END; EX_TRY { CreateManualEvent(bInitialState); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); return IsValid(); } void CLREventBase::CreateMonitorEvent(SIZE_T Cookie) { CONTRACTL { THROWS; GC_NOTRIGGER; // disallow creation of Crst before EE starts PRECONDITION((g_fEEStarted)); PRECONDITION((GetThreadNULLOk() != NULL)); PRECONDITION((!IsOSEvent())); } CONTRACTL_END; // thread-safe SetAutoEvent FastInterlockOr(&m_dwFlags, CLREVENT_FLAGS_AUTO_EVENT); { HANDLE h = WszCreateEvent(NULL,FALSE,FALSE,NULL); if (h == NULL) { ThrowOutOfMemory(); } if (FastInterlockCompareExchangePointer(&m_handle, h, INVALID_HANDLE_VALUE) != INVALID_HANDLE_VALUE) { // We lost the race CloseHandle(h); } } // thread-safe SetInDeadlockDetection FastInterlockOr(&m_dwFlags, CLREVENT_FLAGS_IN_DEADLOCK_DETECTION); for (;;) { LONG oldFlags = m_dwFlags; if (oldFlags & CLREVENT_FLAGS_MONITOREVENT_ALLOCATED) { // Other thread has set the flag already. Nothing left for us to do. break; } LONG newFlags = oldFlags | CLREVENT_FLAGS_MONITOREVENT_ALLOCATED; if (FastInterlockCompareExchange((LONG*)&m_dwFlags, newFlags, oldFlags) != oldFlags) { // We lost the race continue; } // Because we set the allocated bit, we are the ones to do the signalling if (oldFlags & CLREVENT_FLAGS_MONITOREVENT_SIGNALLED) { // We got the honour to signal the event Set(); } break; } } void CLREventBase::SetMonitorEvent() { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; // SetMonitorEvent is robust against initialization races. It is possible to // call CLREvent::SetMonitorEvent on event that has not been initialialized yet by CreateMonitorEvent. // CreateMonitorEvent will signal the event once it is created if it happens. for (;;) { LONG oldFlags = m_dwFlags; if (oldFlags & CLREVENT_FLAGS_MONITOREVENT_ALLOCATED) { // Event has been allocated already. Use the regular codepath. Set(); break; } LONG newFlags = oldFlags | CLREVENT_FLAGS_MONITOREVENT_SIGNALLED; if (FastInterlockCompareExchange((LONG*)&m_dwFlags, newFlags, oldFlags) != oldFlags) { // We lost the race continue; } break; } } void CLREventBase::CreateOSAutoEvent (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { THROWS; GC_NOTRIGGER; // disallow creation of Crst before EE starts PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); } CONTRACTL_END; // Can not assert here. ASP.NET uses our Threadpool before EE is started. //_ASSERTE (g_fEEStarted); SetOSEvent(); SetAutoEvent(); HANDLE h = WszCreateEvent(NULL,FALSE,bInitialState,NULL); if (h == NULL) { ThrowOutOfMemory(); } m_handle = h; } BOOL CLREventBase::CreateOSAutoEventNoThrow (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { NOTHROW; GC_NOTRIGGER; // disallow creation of Crst before EE starts PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); } CONTRACTL_END; EX_TRY { CreateOSAutoEvent(bInitialState); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); return IsValid(); } void CLREventBase::CreateOSManualEvent (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { THROWS; GC_NOTRIGGER; // disallow creation of Crst before EE starts PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); } CONTRACTL_END; // Can not assert here. ASP.NET uses our Threadpool before EE is started. //_ASSERTE (g_fEEStarted); SetOSEvent(); HANDLE h = WszCreateEvent(NULL,TRUE,bInitialState,NULL); if (h == NULL) { ThrowOutOfMemory(); } m_handle = h; } BOOL CLREventBase::CreateOSManualEventNoThrow (BOOL bInitialState // If TRUE, initial state is signalled ) { CONTRACTL { NOTHROW; GC_NOTRIGGER; // disallow creation of Crst before EE starts PRECONDITION((m_handle == INVALID_HANDLE_VALUE)); } CONTRACTL_END; EX_TRY { CreateOSManualEvent(bInitialState); } EX_CATCH { } EX_END_CATCH(SwallowAllExceptions); return IsValid(); } void CLREventBase::CloseEvent() { CONTRACTL { NOTHROW; if (IsInDeadlockDetection()) {GC_TRIGGERS;} else {GC_NOTRIGGER;} } CONTRACTL_END; GCX_MAYBE_PREEMP(IsInDeadlockDetection() && IsValid()); _ASSERTE(Thread::Debug_AllowCallout()); if (m_handle != INVALID_HANDLE_VALUE) { { CloseHandle(m_handle); } m_handle = INVALID_HANDLE_VALUE; } m_dwFlags = 0; } BOOL CLREventBase::Set() { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION((m_handle != INVALID_HANDLE_VALUE)); } CONTRACTL_END; _ASSERTE(Thread::Debug_AllowCallout()); { return SetEvent(m_handle); } } BOOL CLREventBase::Reset() { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION((m_handle != INVALID_HANDLE_VALUE)); } CONTRACTL_END; _ASSERTE(Thread::Debug_AllowCallout()); { return ResetEvent(m_handle); } } static DWORD CLREventWaitHelper2(HANDLE handle, DWORD dwMilliseconds, BOOL alertable) { STATIC_CONTRACT_THROWS; return WaitForSingleObjectEx(handle,dwMilliseconds,alertable); } static DWORD CLREventWaitHelper(HANDLE handle, DWORD dwMilliseconds, BOOL alertable) { STATIC_CONTRACT_NOTHROW; struct Param { HANDLE handle; DWORD dwMilliseconds; BOOL alertable; DWORD result; } param; param.handle = handle; param.dwMilliseconds = dwMilliseconds; param.alertable = alertable; param.result = WAIT_FAILED; // Can not use EX_TRY/CATCH. EX_CATCH toggles GC mode. This function is called // through RareDisablePreemptiveGC. EX_CATCH breaks profiler callback. PAL_TRY(Param *, pParam, &param) { // Need to move to another helper (cannot have SEH and C++ destructors // on automatic variables in one function) pParam->result = CLREventWaitHelper2(pParam->handle, pParam->dwMilliseconds, pParam->alertable); } PAL_EXCEPT (EXCEPTION_EXECUTE_HANDLER) { param.result = WAIT_FAILED; } PAL_ENDTRY; return param.result; } DWORD CLREventBase::Wait(DWORD dwMilliseconds, BOOL alertable, PendingSync *syncState) { WRAPPER_NO_CONTRACT; return WaitEx(dwMilliseconds, alertable?WaitMode_Alertable:WaitMode_None,syncState); } DWORD CLREventBase::WaitEx(DWORD dwMilliseconds, WaitMode mode, PendingSync *syncState) { BOOL alertable = (mode & WaitMode_Alertable)!=0; CONTRACTL { if (alertable) { THROWS; // Thread::DoAppropriateWait can throw } else { NOTHROW; } if (GetThreadNULLOk()) { if (alertable) GC_TRIGGERS; else GC_NOTRIGGER; } else { DISABLED(GC_TRIGGERS); } PRECONDITION(m_handle != INVALID_HANDLE_VALUE); // Handle has to be valid } CONTRACTL_END; _ASSERTE(Thread::Debug_AllowCallout()); Thread * pThread = GetThreadNULLOk(); #ifdef _DEBUG // If a CLREvent is OS event only, we can not wait for the event on a managed thread if (IsOSEvent()) _ASSERTE (pThread == NULL); #endif _ASSERTE((pThread != NULL) || !g_fEEStarted || dbgOnly_IsSpecialEEThread()); { if (pThread && alertable) { DWORD dwRet = WAIT_FAILED; dwRet = pThread->DoAppropriateWait(1, &m_handle, FALSE, dwMilliseconds, mode, syncState); return dwRet; } else { _ASSERTE (syncState == NULL); return CLREventWaitHelper(m_handle,dwMilliseconds,alertable); } } } void CLRSemaphore::Create (DWORD dwInitial, DWORD dwMax) { CONTRACTL { THROWS; GC_NOTRIGGER; PRECONDITION(m_handle == INVALID_HANDLE_VALUE); } CONTRACTL_END; { HANDLE h = WszCreateSemaphore(NULL,dwInitial,dwMax,NULL); if (h == NULL) { ThrowOutOfMemory(); } m_handle = h; } } void CLRSemaphore::Close() { LIMITED_METHOD_CONTRACT; if (m_handle != INVALID_HANDLE_VALUE) { CloseHandle(m_handle); m_handle = INVALID_HANDLE_VALUE; } } BOOL CLRSemaphore::Release(LONG lReleaseCount, LONG *lpPreviousCount) { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_handle != INVALID_HANDLE_VALUE); } CONTRACTL_END; { return ::ReleaseSemaphore(m_handle, lReleaseCount, lpPreviousCount); } } DWORD CLRSemaphore::Wait(DWORD dwMilliseconds, BOOL alertable) { CONTRACTL { if (GetThreadNULLOk() && alertable) { THROWS; // Thread::DoAppropriateWait can throw } else { NOTHROW; } if (GetThreadNULLOk()) { if (alertable) GC_TRIGGERS; else GC_NOTRIGGER; } else { DISABLED(GC_TRIGGERS); } PRECONDITION(m_handle != INVALID_HANDLE_VALUE); // Invalid to have invalid handle } CONTRACTL_END; Thread *pThread = GetThreadNULLOk(); _ASSERTE (pThread || !g_fEEStarted || dbgOnly_IsSpecialEEThread()); { // TODO wwl: if alertable is FALSE, do we support a host to break a deadlock? // Currently we can not call through DoAppropriateWait because of CannotThrowComplusException. // We should re-consider this after our code is exception safe. if (pThread && alertable) { return pThread->DoAppropriateWait(1, &m_handle, FALSE, dwMilliseconds, alertable?WaitMode_Alertable:WaitMode_None, NULL); } else { DWORD result = WAIT_FAILED; EX_TRY { result = WaitForSingleObjectEx(m_handle,dwMilliseconds,alertable); } EX_CATCH { result = WAIT_FAILED; } EX_END_CATCH(SwallowAllExceptions); return result; } } } void CLRLifoSemaphore::Create(INT32 initialSignalCount, INT32 maximumSignalCount) { CONTRACTL { THROWS; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE(maximumSignalCount > 0); _ASSERTE(initialSignalCount <= maximumSignalCount); _ASSERTE(m_handle == nullptr); #ifdef TARGET_UNIX HANDLE h = WszCreateSemaphore(nullptr, 0, maximumSignalCount, nullptr); #else // !TARGET_UNIX HANDLE h = CreateIoCompletionPort(INVALID_HANDLE_VALUE, nullptr, 0, maximumSignalCount); #endif // TARGET_UNIX if (h == nullptr) { ThrowOutOfMemory(); } m_handle = h; m_counts.signalCount = initialSignalCount; INDEBUG(m_maximumSignalCount = maximumSignalCount); } void CLRLifoSemaphore::Close() { LIMITED_METHOD_CONTRACT; if (m_handle == nullptr) { return; } CloseHandle(m_handle); m_handle = nullptr; } bool CLRLifoSemaphore::WaitForSignal(DWORD timeoutMs) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE(timeoutMs != 0); _ASSERTE(m_handle != nullptr); _ASSERTE(m_counts.VolatileLoadWithoutBarrier().waiterCount != (UINT16)0); while (true) { // Wait for a signal BOOL waitSuccessful; { #ifdef TARGET_UNIX // Do a prioritized wait to get LIFO waiter release order DWORD waitResult = PAL_WaitForSingleObjectPrioritized(m_handle, timeoutMs); _ASSERTE(waitResult == WAIT_OBJECT_0 || waitResult == WAIT_TIMEOUT); waitSuccessful = waitResult == WAIT_OBJECT_0; #else // !TARGET_UNIX // I/O completion ports release waiters in LIFO order, see // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365198(v=vs.85).aspx DWORD numberOfBytes; ULONG_PTR completionKey; LPOVERLAPPED overlapped; waitSuccessful = GetQueuedCompletionStatus(m_handle, &numberOfBytes, &completionKey, &overlapped, timeoutMs); _ASSERTE(waitSuccessful || GetLastError() == WAIT_TIMEOUT); _ASSERTE(overlapped == nullptr); #endif // TARGET_UNIX } if (!waitSuccessful) { // Unregister the waiter. The wait subsystem used above guarantees that a thread that wakes due to a timeout does // not observe a signal to the object being waited upon. Counts toSubtract; ++toSubtract.waiterCount; Counts countsBeforeUpdate = m_counts.ExchangeAdd(-toSubtract); _ASSERTE(countsBeforeUpdate.waiterCount != (UINT16)0); return false; } // Unregister the waiter if this thread will not be waiting anymore, and try to acquire the semaphore Counts counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { _ASSERTE(counts.waiterCount != (UINT16)0); Counts newCounts = counts; if (counts.signalCount != 0) { --newCounts.signalCount; --newCounts.waiterCount; } // This waiter has woken up and this needs to be reflected in the count of waiters signaled to wake if (counts.countOfWaitersSignaledToWake != (UINT8)0) { --newCounts.countOfWaitersSignaledToWake; } Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { if (counts.signalCount != 0) { return true; } break; } counts = countsBeforeUpdate; } } } bool CLRLifoSemaphore::Wait(DWORD timeoutMs) { WRAPPER_NO_CONTRACT; _ASSERTE(m_handle != nullptr); // Acquire the semaphore or register as a waiter Counts counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { _ASSERTE(counts.signalCount <= m_maximumSignalCount); Counts newCounts = counts; if (counts.signalCount != 0) { --newCounts.signalCount; } else if (timeoutMs != 0) { ++newCounts.waiterCount; _ASSERTE(newCounts.waiterCount != (UINT16)0); // overflow check, this many waiters is currently not supported } Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { return counts.signalCount != 0 || (timeoutMs != 0 && WaitForSignal(timeoutMs)); } counts = countsBeforeUpdate; } } bool CLRLifoSemaphore::Wait(DWORD timeoutMs, UINT32 spinCount, UINT32 processorCount) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE(m_handle != nullptr); if (timeoutMs == 0 || spinCount == 0) { return Wait(timeoutMs); } // Try to acquire the semaphore or register as a spinner Counts counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { Counts newCounts = counts; if (counts.signalCount != 0) { --newCounts.signalCount; } else { ++newCounts.spinnerCount; if (newCounts.spinnerCount == (UINT8)0) { // Maximum number of spinners reached, register as a waiter instead --newCounts.spinnerCount; ++newCounts.waiterCount; _ASSERTE(newCounts.waiterCount != (UINT16)0); // overflow check, this many waiters is currently not supported } } Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { if (counts.signalCount != 0) { return true; } if (newCounts.waiterCount != counts.waiterCount) { return WaitForSignal(timeoutMs); } break; } counts = countsBeforeUpdate; } #ifdef TARGET_ARM64 // For now, the spinning changes are disabled on ARM64. The spin loop below replicates how UnfairSemaphore used to spin. // Once more tuning is done on ARM64, it should be possible to come up with a spinning scheme that works well everywhere. int spinCountPerProcessor = spinCount; for (UINT32 i = 1; ; ++i) { // Wait ClrSleepEx(0, false); // Try to acquire the semaphore and unregister as a spinner counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { _ASSERTE(counts.spinnerCount != (UINT8)0); if (counts.signalCount == 0) { break; } Counts newCounts = counts; --newCounts.signalCount; --newCounts.spinnerCount; Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { return true; } counts = countsBeforeUpdate; } // Determine whether to spin further double spinnersPerProcessor = (double)counts.spinnerCount / processorCount; UINT32 spinLimit = (UINT32)(spinCountPerProcessor / spinnersPerProcessor + 0.5); if (i >= spinLimit) { break; } } #else // !TARGET_ARM64 const UINT32 Sleep0Threshold = 10; YieldProcessorNormalizationInfo normalizationInfo; #ifdef TARGET_UNIX // The PAL's wait subsystem is quite slow, spin more to compensate for the more expensive wait spinCount *= 2; #endif // TARGET_UNIX for (UINT32 i = 0; i < spinCount; ++i) { // Wait // // (i - Sleep0Threshold) % 2 != 0: The purpose of this check is to interleave Thread.Yield/Sleep(0) with // Thread.SpinWait. Otherwise, the following issues occur: // - When there are no threads to switch to, Yield and Sleep(0) become no-op and it turns the spin loop into a // busy-spin that may quickly reach the max spin count and cause the thread to enter a wait state. Completing the // spin loop too early can cause excessive context switcing from the wait. // - If there are multiple threads doing Yield and Sleep(0) (typically from the same spin loop due to contention), // they may switch between one another, delaying work that can make progress. if (i < Sleep0Threshold || (i - Sleep0Threshold) % 2 != 0) { YieldProcessorWithBackOffNormalized(normalizationInfo, i); } else { // Not doing SwitchToThread(), it does not seem to have any benefit over Sleep(0) ClrSleepEx(0, false); } // Try to acquire the semaphore and unregister as a spinner counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { _ASSERTE(counts.spinnerCount != (UINT8)0); if (counts.signalCount == 0) { break; } Counts newCounts = counts; --newCounts.signalCount; --newCounts.spinnerCount; Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { return true; } counts = countsBeforeUpdate; } } #endif // TARGET_ARM64 // Unregister as a spinner, and acquire the semaphore or register as a waiter counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { _ASSERTE(counts.spinnerCount != (UINT8)0); Counts newCounts = counts; --newCounts.spinnerCount; if (counts.signalCount != 0) { --newCounts.signalCount; } else { ++newCounts.waiterCount; _ASSERTE(newCounts.waiterCount != (UINT16)0); // overflow check, this many waiters is currently not supported } Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { return counts.signalCount != 0 || WaitForSignal(timeoutMs); } counts = countsBeforeUpdate; } } void CLRLifoSemaphore::Release(INT32 releaseCount) { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; _ASSERTE(releaseCount > 0); _ASSERTE((UINT32)releaseCount <= m_maximumSignalCount); _ASSERTE(m_handle != INVALID_HANDLE_VALUE); INT32 countOfWaitersToWake; Counts counts = m_counts.VolatileLoadWithoutBarrier(); while (true) { Counts newCounts = counts; // Increase the signal count. The addition doesn't overflow because of the limit on the max signal count in Create. newCounts.signalCount += releaseCount; _ASSERTE(newCounts.signalCount > counts.signalCount); // Determine how many waiters to wake, taking into account how many spinners and waiters there are and how many waiters // have previously been signaled to wake but have not yet woken countOfWaitersToWake = (INT32)min(newCounts.signalCount, (UINT32)newCounts.waiterCount + newCounts.spinnerCount) - newCounts.spinnerCount - newCounts.countOfWaitersSignaledToWake; if (countOfWaitersToWake > 0) { // Ideally, limiting to a maximum of releaseCount would not be necessary and could be an assert instead, but since // WaitForSignal() does not have enough information to tell whether a woken thread was signaled, and due to the cap // below, it's possible for countOfWaitersSignaledToWake to be less than the number of threads that have actually // been signaled to wake. if (countOfWaitersToWake > releaseCount) { countOfWaitersToWake = releaseCount; } // Cap countOfWaitersSignaledToWake to its max value. It's ok to ignore some woken threads in this count, it just // means some more threads will be woken next time. Typically, it won't reach the max anyway. newCounts.countOfWaitersSignaledToWake += (UINT8)min(countOfWaitersToWake, (INT32)UINT8_MAX); if (newCounts.countOfWaitersSignaledToWake <= counts.countOfWaitersSignaledToWake) { newCounts.countOfWaitersSignaledToWake = UINT8_MAX; } } Counts countsBeforeUpdate = m_counts.CompareExchange(newCounts, counts); if (countsBeforeUpdate == counts) { _ASSERTE((UINT32)releaseCount <= m_maximumSignalCount - counts.signalCount); if (countOfWaitersToWake <= 0) { return; } break; } counts = countsBeforeUpdate; } // Wake waiters #ifdef TARGET_UNIX BOOL released = ReleaseSemaphore(m_handle, countOfWaitersToWake, nullptr); _ASSERTE(released); #else // !TARGET_UNIX while (--countOfWaitersToWake >= 0) { while (!PostQueuedCompletionStatus(m_handle, 0, 0, nullptr)) { // Probably out of memory. It's not valid to stop and throw here, so try again after a delay. ClrSleepEx(1, false); } } #endif // TARGET_UNIX } void CLRMutex::Create(LPSECURITY_ATTRIBUTES lpMutexAttributes, BOOL bInitialOwner, LPCTSTR lpName) { CONTRACTL { THROWS; GC_NOTRIGGER; PRECONDITION(m_handle == INVALID_HANDLE_VALUE && m_handle != NULL); } CONTRACTL_END; m_handle = WszCreateMutex(lpMutexAttributes,bInitialOwner,lpName); if (m_handle == NULL) { ThrowOutOfMemory(); } } void CLRMutex::Close() { LIMITED_METHOD_CONTRACT; if (m_handle != INVALID_HANDLE_VALUE) { CloseHandle(m_handle); m_handle = INVALID_HANDLE_VALUE; } } BOOL CLRMutex::Release() { CONTRACTL { NOTHROW; GC_NOTRIGGER; PRECONDITION(m_handle != INVALID_HANDLE_VALUE && m_handle != NULL); } CONTRACTL_END; BOOL fRet = ReleaseMutex(m_handle); if (fRet) { EE_LOCK_RELEASED(this); } return fRet; } DWORD CLRMutex::Wait(DWORD dwMilliseconds, BOOL bAlertable) { CONTRACTL { NOTHROW; GC_NOTRIGGER; CAN_TAKE_LOCK; PRECONDITION(m_handle != INVALID_HANDLE_VALUE && m_handle != NULL); } CONTRACTL_END; DWORD fRet = WaitForSingleObjectEx(m_handle, dwMilliseconds, bAlertable); if (fRet == WAIT_OBJECT_0) { EE_LOCK_TAKEN(this); } return fRet; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/vm/nativelibrary.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _NATIVELIBRARY_H_ #define _NATIVELIBRARY_H_ #include <clrtypes.h> class NativeLibrary { public: static NATIVE_LIBRARY_HANDLE LoadLibraryFromPath(LPCWSTR libraryPath, BOOL throwOnError); static NATIVE_LIBRARY_HANDLE LoadLibraryByName(LPCWSTR name, Assembly *callingAssembly, BOOL hasDllImportSearchPathFlags, DWORD dllImportSearchPathFlags, BOOL throwOnError); static void FreeNativeLibrary(NATIVE_LIBRARY_HANDLE handle); static INT_PTR GetNativeLibraryExport(NATIVE_LIBRARY_HANDLE handle, LPCWSTR symbolName, BOOL throwOnError); static NATIVE_LIBRARY_HANDLE LoadLibraryFromMethodDesc(NDirectMethodDesc *pMD); }; #endif // _NATIVELIBRARY_H_
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #ifndef _NATIVELIBRARY_H_ #define _NATIVELIBRARY_H_ #include <clrtypes.h> class NativeLibrary { public: static NATIVE_LIBRARY_HANDLE LoadLibraryFromPath(LPCWSTR libraryPath, BOOL throwOnError); static NATIVE_LIBRARY_HANDLE LoadLibraryByName(LPCWSTR name, Assembly *callingAssembly, BOOL hasDllImportSearchPathFlags, DWORD dllImportSearchPathFlags, BOOL throwOnError); static void FreeNativeLibrary(NATIVE_LIBRARY_HANDLE handle); static INT_PTR GetNativeLibraryExport(NATIVE_LIBRARY_HANDLE handle, LPCWSTR symbolName, BOOL throwOnError); static NATIVE_LIBRARY_HANDLE LoadLibraryFromMethodDesc(NDirectMethodDesc *pMD); }; #endif // _NATIVELIBRARY_H_
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/utilcode/posterror.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // PostErrors.cpp // // This module contains the error handling/posting code for the engine. It // is assumed that all methods may be called by a dispatch client, and therefore // errors are always posted using IErrorInfo. // //***************************************************************************** #include "stdafx.h" // Standard header. #ifndef FEATURE_UTILCODE_NO_DEPENDENCIES #include <utilcode.h> // Utility helpers. #include <corerror.h> #include "../dlls/mscorrc/resource.h" #include "ex.h" #include <posterror.h> // Local prototypes. HRESULT FillErrorInfo(LPCWSTR szMsg, DWORD dwHelpContext); void GetResourceCultureCallbacks( FPGETTHREADUICULTURENAMES* fpGetThreadUICultureNames, FPGETTHREADUICULTUREID* fpGetThreadUICultureId) { WRAPPER_NO_CONTRACT; CCompRC::GetDefaultCallbacks( fpGetThreadUICultureNames, fpGetThreadUICultureId ); } //***************************************************************************** // Set callbacks to get culture info //***************************************************************************** void SetResourceCultureCallbacks( FPGETTHREADUICULTURENAMES fpGetThreadUICultureNames, FPGETTHREADUICULTUREID fpGetThreadUICultureId // TODO: Don't rely on the LCID, only the name ) { WRAPPER_NO_CONTRACT; CCompRC::SetDefaultCallbacks( fpGetThreadUICultureNames, fpGetThreadUICultureId ); } //***************************************************************************** // Public function to load a resource string //***************************************************************************** STDAPI UtilLoadStringRC( UINT iResourceID, _Out_writes_(iMax) LPWSTR szBuffer, int iMax, int bQuiet ) { WRAPPER_NO_CONTRACT; return UtilLoadResourceString(bQuiet? CCompRC::Optional : CCompRC::Required,iResourceID, szBuffer, iMax); } HRESULT UtilLoadResourceString(CCompRC::ResourceCategory eCategory, UINT iResourceID, _Out_writes_ (iMax) LPWSTR szBuffer, int iMax) { CONTRACTL { DISABLED(NOTHROW); GC_NOTRIGGER; } CONTRACTL_END; HRESULT retVal = E_OUTOFMEMORY; SString::Startup(); EX_TRY { CCompRC *pResourceDLL = CCompRC::GetDefaultResourceDll(); if (pResourceDLL != NULL) { retVal = pResourceDLL->LoadString(eCategory, iResourceID, szBuffer, iMax); } } EX_CATCH { // Catch any errors and return E_OUTOFMEMORY; retVal = E_OUTOFMEMORY; } EX_END_CATCH(SwallowAllExceptions); return retVal; } //***************************************************************************** // Format a Runtime Error message. //***************************************************************************** HRESULT __cdecl FormatRuntimeErrorVa( _Inout_updates_(cchMsg) WCHAR *rcMsg, // Buffer into which to format. ULONG cchMsg, // Size of buffer, characters. HRESULT hrRpt, // The HR to report. va_list marker) // Optional args. { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; WCHAR rcBuf[512]; // Resource string. HRESULT hr; // Ensure nul termination. *rcMsg = W('\0'); // If this is one of our errors or if it is simply a resource ID, then grab the error from the rc file. if ((HRESULT_FACILITY(hrRpt) == FACILITY_URT) || (HIWORD(hrRpt) == 0)) { hr = UtilLoadStringRC(LOWORD(hrRpt), rcBuf, ARRAY_SIZE(rcBuf), true); if (hr == S_OK) { _vsnwprintf_s(rcMsg, cchMsg, _TRUNCATE, rcBuf, marker); } } // Otherwise it isn't one of ours, so we need to see if the system can // find the text for it. else { if (WszFormatMessage(FORMAT_MESSAGE_FROM_SYSTEM, 0, hrRpt, 0, rcMsg, cchMsg, 0/*<TODO>@todo: marker</TODO>*/)) { hr = S_OK; // System messages contain a trailing \r\n, which we don't want normally. size_t iLen = wcslen(rcMsg); if (iLen > 3 && rcMsg[iLen - 2] == '\r' && rcMsg[iLen - 1] == '\n') rcMsg[iLen - 2] = '\0'; } else hr = HRESULT_FROM_GetLastError(); } // If we failed to find the message anywhere, then issue a hard coded message. if (FAILED(hr)) { _snwprintf_s(rcMsg, cchMsg, _TRUNCATE, W("Common Language Runtime Internal error: 0x%08x"), hrRpt); DEBUG_STMT(DbgWriteEx(rcMsg)); } return hrRpt; } // FormatRuntimeErrorVa //***************************************************************************** // Format a Runtime Error message, varargs. //***************************************************************************** HRESULT __cdecl FormatRuntimeError( _Out_writes_(cchMsg) WCHAR *rcMsg, // Buffer into which to format. ULONG cchMsg, // Size of buffer, characters. HRESULT hrRpt, // The HR to report. ...) // Optional args. { WRAPPER_NO_CONTRACT; va_list marker; // User text. va_start(marker, hrRpt); hrRpt = FormatRuntimeErrorVa(rcMsg, cchMsg, hrRpt, marker); va_end(marker); return hrRpt; } #ifdef FEATURE_COMINTEROP //***************************************************************************** // Create, fill out and set an error info object. Note that this does not fill // out the IID for the error object; that is done elsewhere. //***************************************************************************** HRESULT FillErrorInfo( // Return status. LPCWSTR szMsg, // Error message. DWORD dwHelpContext) // Help context. { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; ICreateErrorInfo *pICreateErr = NULL; // Error info creation Iface pointer. IErrorInfo *pIErrInfo = NULL; // The IErrorInfo interface. HRESULT hr; // Return status. // Get the ICreateErrorInfo pointer. hr = S_OK; EX_TRY { hr = CreateErrorInfo(&pICreateErr); } EX_CATCH { hr = GET_EXCEPTION()->GetHR(); } EX_END_CATCH(SwallowAllExceptions); if (FAILED(hr)) return (hr); // Set message text description. if (FAILED(hr = pICreateErr->SetDescription((LPWSTR) szMsg))) goto Exit1; // suppress PreFast warning about passing literal string to non-const API. // This API (ICreateErrorInfo::SetHelpFile) is documented to take a const argument, but // we can't put const in the signature because it would break existing implementors of // the API. #ifdef _PREFAST_ #pragma prefast(push) #pragma warning(disable:6298) #endif // Set the help file and help context. //<TODO>@todo: we don't have a help file yet.</TODO> if (FAILED(hr = pICreateErr->SetHelpFile(const_cast<WCHAR*>(W("complib.hlp")))) || FAILED(hr = pICreateErr->SetHelpContext(dwHelpContext))) goto Exit1; #ifdef _PREFAST_ #pragma prefast(pop) #endif // Get the IErrorInfo pointer. if (FAILED(hr = pICreateErr->QueryInterface(IID_IErrorInfo, (PVOID *) &pIErrInfo))) goto Exit1; // Save the error and release our local pointers. { // If we get here, we have loaded oleaut32.dll. CONTRACT_VIOLATION(ThrowsViolation); SetErrorInfo(0L, pIErrInfo); } Exit1: pICreateErr->Release(); if (pIErrInfo) { pIErrInfo->Release(); } return hr; } #endif // FEATURE_COMINTEROP //***************************************************************************** // This function will post an error for the client. If the LOWORD(hrRpt) can // be found as a valid error message, then it is loaded and formatted with // the arguments passed in. If it cannot be found, then the error is checked // against FormatMessage to see if it is a system error. System errors are // not formatted so no add'l parameters are required. If any errors in this // process occur, hrRpt is returned for the client with no error posted. //***************************************************************************** extern "C" HRESULT __cdecl PostErrorVA( // Returned error. HRESULT hrRpt, // Reported error. va_list marker) // Error arguments. { CONTRACTL { NOTHROW; GC_NOTRIGGER; ENTRY_POINT; } CONTRACTL_END; #ifdef FEATURE_COMINTEROP const DWORD cchMsg = 4096; WCHAR *rcMsg = (WCHAR*)alloca(cchMsg * sizeof(WCHAR)); // Error message. HRESULT hr; BEGIN_ENTRYPOINT_NOTHROW; // Return warnings without text. if (!FAILED(hrRpt)) goto ErrExit; // If we are already out of memory or out of stack or the thread is in some bad state, // we don't want throw gasoline on the fire by calling ErrorInfo stuff below (which can // trigger a delayload of oleaut32.dll). We don't need to embellish transient errors // so just return this without text. if (Exception::IsTransient(hrRpt)) { goto ErrExit; } // Format the error. FormatRuntimeErrorVa(rcMsg, cchMsg, hrRpt, marker); // Turn the error into a posted error message. If this fails, we still // return the original error message since a message caused by our error // handling system isn't going to give you a clue about the original error. hr = FillErrorInfo(rcMsg, LOWORD(hrRpt)); _ASSERTE(hr == S_OK); ErrExit: END_ENTRYPOINT_NOTHROW; #endif // FEATURE_COMINTEROP return (hrRpt); } // PostErrorVA #endif //!FEATURE_UTILCODE_NO_DEPENDENCIES //***************************************************************************** // This function will post an error for the client. If the LOWORD(hrRpt) can // be found as a valid error message, then it is loaded and formatted with // the arguments passed in. If it cannot be found, then the error is checked // against FormatMessage to see if it is a system error. System errors are // not formatted so no add'l parameters are required. If any errors in this // process occur, hrRpt is returned for the client with no error posted. //***************************************************************************** extern "C" HRESULT __cdecl PostError( HRESULT hrRpt, // Reported error. ...) // Error arguments. { #ifndef FEATURE_UTILCODE_NO_DEPENDENCIES WRAPPER_NO_CONTRACT; va_list marker; // User text. va_start(marker, hrRpt); hrRpt = PostErrorVA(hrRpt, marker); va_end(marker); #endif //!FEATURE_UTILCODE_NO_DEPENDENCIES return hrRpt; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //***************************************************************************** // PostErrors.cpp // // This module contains the error handling/posting code for the engine. It // is assumed that all methods may be called by a dispatch client, and therefore // errors are always posted using IErrorInfo. // //***************************************************************************** #include "stdafx.h" // Standard header. #ifndef FEATURE_UTILCODE_NO_DEPENDENCIES #include <utilcode.h> // Utility helpers. #include <corerror.h> #include "../dlls/mscorrc/resource.h" #include "ex.h" #include <posterror.h> // Local prototypes. HRESULT FillErrorInfo(LPCWSTR szMsg, DWORD dwHelpContext); void GetResourceCultureCallbacks( FPGETTHREADUICULTURENAMES* fpGetThreadUICultureNames, FPGETTHREADUICULTUREID* fpGetThreadUICultureId) { WRAPPER_NO_CONTRACT; CCompRC::GetDefaultCallbacks( fpGetThreadUICultureNames, fpGetThreadUICultureId ); } //***************************************************************************** // Set callbacks to get culture info //***************************************************************************** void SetResourceCultureCallbacks( FPGETTHREADUICULTURENAMES fpGetThreadUICultureNames, FPGETTHREADUICULTUREID fpGetThreadUICultureId // TODO: Don't rely on the LCID, only the name ) { WRAPPER_NO_CONTRACT; CCompRC::SetDefaultCallbacks( fpGetThreadUICultureNames, fpGetThreadUICultureId ); } //***************************************************************************** // Public function to load a resource string //***************************************************************************** STDAPI UtilLoadStringRC( UINT iResourceID, _Out_writes_(iMax) LPWSTR szBuffer, int iMax, int bQuiet ) { WRAPPER_NO_CONTRACT; return UtilLoadResourceString(bQuiet? CCompRC::Optional : CCompRC::Required,iResourceID, szBuffer, iMax); } HRESULT UtilLoadResourceString(CCompRC::ResourceCategory eCategory, UINT iResourceID, _Out_writes_ (iMax) LPWSTR szBuffer, int iMax) { CONTRACTL { DISABLED(NOTHROW); GC_NOTRIGGER; } CONTRACTL_END; HRESULT retVal = E_OUTOFMEMORY; SString::Startup(); EX_TRY { CCompRC *pResourceDLL = CCompRC::GetDefaultResourceDll(); if (pResourceDLL != NULL) { retVal = pResourceDLL->LoadString(eCategory, iResourceID, szBuffer, iMax); } } EX_CATCH { // Catch any errors and return E_OUTOFMEMORY; retVal = E_OUTOFMEMORY; } EX_END_CATCH(SwallowAllExceptions); return retVal; } //***************************************************************************** // Format a Runtime Error message. //***************************************************************************** HRESULT __cdecl FormatRuntimeErrorVa( _Inout_updates_(cchMsg) WCHAR *rcMsg, // Buffer into which to format. ULONG cchMsg, // Size of buffer, characters. HRESULT hrRpt, // The HR to report. va_list marker) // Optional args. { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; WCHAR rcBuf[512]; // Resource string. HRESULT hr; // Ensure nul termination. *rcMsg = W('\0'); // If this is one of our errors or if it is simply a resource ID, then grab the error from the rc file. if ((HRESULT_FACILITY(hrRpt) == FACILITY_URT) || (HIWORD(hrRpt) == 0)) { hr = UtilLoadStringRC(LOWORD(hrRpt), rcBuf, ARRAY_SIZE(rcBuf), true); if (hr == S_OK) { _vsnwprintf_s(rcMsg, cchMsg, _TRUNCATE, rcBuf, marker); } } // Otherwise it isn't one of ours, so we need to see if the system can // find the text for it. else { if (WszFormatMessage(FORMAT_MESSAGE_FROM_SYSTEM, 0, hrRpt, 0, rcMsg, cchMsg, 0/*<TODO>@todo: marker</TODO>*/)) { hr = S_OK; // System messages contain a trailing \r\n, which we don't want normally. size_t iLen = wcslen(rcMsg); if (iLen > 3 && rcMsg[iLen - 2] == '\r' && rcMsg[iLen - 1] == '\n') rcMsg[iLen - 2] = '\0'; } else hr = HRESULT_FROM_GetLastError(); } // If we failed to find the message anywhere, then issue a hard coded message. if (FAILED(hr)) { _snwprintf_s(rcMsg, cchMsg, _TRUNCATE, W("Common Language Runtime Internal error: 0x%08x"), hrRpt); DEBUG_STMT(DbgWriteEx(rcMsg)); } return hrRpt; } // FormatRuntimeErrorVa //***************************************************************************** // Format a Runtime Error message, varargs. //***************************************************************************** HRESULT __cdecl FormatRuntimeError( _Out_writes_(cchMsg) WCHAR *rcMsg, // Buffer into which to format. ULONG cchMsg, // Size of buffer, characters. HRESULT hrRpt, // The HR to report. ...) // Optional args. { WRAPPER_NO_CONTRACT; va_list marker; // User text. va_start(marker, hrRpt); hrRpt = FormatRuntimeErrorVa(rcMsg, cchMsg, hrRpt, marker); va_end(marker); return hrRpt; } #ifdef FEATURE_COMINTEROP //***************************************************************************** // Create, fill out and set an error info object. Note that this does not fill // out the IID for the error object; that is done elsewhere. //***************************************************************************** HRESULT FillErrorInfo( // Return status. LPCWSTR szMsg, // Error message. DWORD dwHelpContext) // Help context. { CONTRACTL { NOTHROW; GC_NOTRIGGER; } CONTRACTL_END; ICreateErrorInfo *pICreateErr = NULL; // Error info creation Iface pointer. IErrorInfo *pIErrInfo = NULL; // The IErrorInfo interface. HRESULT hr; // Return status. // Get the ICreateErrorInfo pointer. hr = S_OK; EX_TRY { hr = CreateErrorInfo(&pICreateErr); } EX_CATCH { hr = GET_EXCEPTION()->GetHR(); } EX_END_CATCH(SwallowAllExceptions); if (FAILED(hr)) return (hr); // Set message text description. if (FAILED(hr = pICreateErr->SetDescription((LPWSTR) szMsg))) goto Exit1; // suppress PreFast warning about passing literal string to non-const API. // This API (ICreateErrorInfo::SetHelpFile) is documented to take a const argument, but // we can't put const in the signature because it would break existing implementors of // the API. #ifdef _PREFAST_ #pragma prefast(push) #pragma warning(disable:6298) #endif // Set the help file and help context. //<TODO>@todo: we don't have a help file yet.</TODO> if (FAILED(hr = pICreateErr->SetHelpFile(const_cast<WCHAR*>(W("complib.hlp")))) || FAILED(hr = pICreateErr->SetHelpContext(dwHelpContext))) goto Exit1; #ifdef _PREFAST_ #pragma prefast(pop) #endif // Get the IErrorInfo pointer. if (FAILED(hr = pICreateErr->QueryInterface(IID_IErrorInfo, (PVOID *) &pIErrInfo))) goto Exit1; // Save the error and release our local pointers. { // If we get here, we have loaded oleaut32.dll. CONTRACT_VIOLATION(ThrowsViolation); SetErrorInfo(0L, pIErrInfo); } Exit1: pICreateErr->Release(); if (pIErrInfo) { pIErrInfo->Release(); } return hr; } #endif // FEATURE_COMINTEROP //***************************************************************************** // This function will post an error for the client. If the LOWORD(hrRpt) can // be found as a valid error message, then it is loaded and formatted with // the arguments passed in. If it cannot be found, then the error is checked // against FormatMessage to see if it is a system error. System errors are // not formatted so no add'l parameters are required. If any errors in this // process occur, hrRpt is returned for the client with no error posted. //***************************************************************************** extern "C" HRESULT __cdecl PostErrorVA( // Returned error. HRESULT hrRpt, // Reported error. va_list marker) // Error arguments. { CONTRACTL { NOTHROW; GC_NOTRIGGER; ENTRY_POINT; } CONTRACTL_END; #ifdef FEATURE_COMINTEROP const DWORD cchMsg = 4096; WCHAR *rcMsg = (WCHAR*)alloca(cchMsg * sizeof(WCHAR)); // Error message. HRESULT hr; BEGIN_ENTRYPOINT_NOTHROW; // Return warnings without text. if (!FAILED(hrRpt)) goto ErrExit; // If we are already out of memory or out of stack or the thread is in some bad state, // we don't want throw gasoline on the fire by calling ErrorInfo stuff below (which can // trigger a delayload of oleaut32.dll). We don't need to embellish transient errors // so just return this without text. if (Exception::IsTransient(hrRpt)) { goto ErrExit; } // Format the error. FormatRuntimeErrorVa(rcMsg, cchMsg, hrRpt, marker); // Turn the error into a posted error message. If this fails, we still // return the original error message since a message caused by our error // handling system isn't going to give you a clue about the original error. hr = FillErrorInfo(rcMsg, LOWORD(hrRpt)); _ASSERTE(hr == S_OK); ErrExit: END_ENTRYPOINT_NOTHROW; #endif // FEATURE_COMINTEROP return (hrRpt); } // PostErrorVA #endif //!FEATURE_UTILCODE_NO_DEPENDENCIES //***************************************************************************** // This function will post an error for the client. If the LOWORD(hrRpt) can // be found as a valid error message, then it is loaded and formatted with // the arguments passed in. If it cannot be found, then the error is checked // against FormatMessage to see if it is a system error. System errors are // not formatted so no add'l parameters are required. If any errors in this // process occur, hrRpt is returned for the client with no error posted. //***************************************************************************** extern "C" HRESULT __cdecl PostError( HRESULT hrRpt, // Reported error. ...) // Error arguments. { #ifndef FEATURE_UTILCODE_NO_DEPENDENCIES WRAPPER_NO_CONTRACT; va_list marker; // User text. va_start(marker, hrRpt); hrRpt = PostErrorVA(hrRpt, marker); va_end(marker); #endif //!FEATURE_UTILCODE_NO_DEPENDENCIES return hrRpt; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/native/external/zlib/deflate.h
/* deflate.h -- internal compression state * Copyright (C) 1995-2016 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* @(#) $Id$ */ #ifndef DEFLATE_H #define DEFLATE_H #include "zutil.h" /* define NO_GZIP when compiling if you want to disable gzip header and trailer creation by deflate(). NO_GZIP would be used to avoid linking in the crc code when it is not needed. For shared libraries, gzip encoding should be left enabled. */ #ifndef NO_GZIP # define GZIP #endif /* =========================================================================== * Internal compression state. */ #define LENGTH_CODES 29 /* number of length codes, not counting the special END_BLOCK code */ #define LITERALS 256 /* number of literal bytes 0..255 */ #define L_CODES (LITERALS+1+LENGTH_CODES) /* number of Literal or Length codes, including the END_BLOCK code */ #define D_CODES 30 /* number of distance codes */ #define BL_CODES 19 /* number of codes used to transfer the bit lengths */ #define HEAP_SIZE (2*L_CODES+1) /* maximum heap size */ #define MAX_BITS 15 /* All codes must not exceed MAX_BITS bits */ #define Buf_size 16 /* size of bit buffer in bi_buf */ #define INIT_STATE 42 /* zlib header -> BUSY_STATE */ #ifdef GZIP # define GZIP_STATE 57 /* gzip header -> BUSY_STATE | EXTRA_STATE */ #endif #define EXTRA_STATE 69 /* gzip extra block -> NAME_STATE */ #define NAME_STATE 73 /* gzip file name -> COMMENT_STATE */ #define COMMENT_STATE 91 /* gzip comment -> HCRC_STATE */ #define HCRC_STATE 103 /* gzip header CRC -> BUSY_STATE */ #define BUSY_STATE 113 /* deflate -> FINISH_STATE */ #define FINISH_STATE 666 /* stream complete */ /* Stream status */ /* Data structure describing a single value and its code string. */ typedef struct ct_data_s { union { ush freq; /* frequency count */ ush code; /* bit string */ } fc; union { ush dad; /* father node in Huffman tree */ ush len; /* length of bit string */ } dl; } FAR ct_data; #define Freq fc.freq #define Code fc.code #define Dad dl.dad #define Len dl.len typedef struct static_tree_desc_s static_tree_desc; typedef struct tree_desc_s { ct_data *dyn_tree; /* the dynamic tree */ int max_code; /* largest code with non zero frequency */ const static_tree_desc *stat_desc; /* the corresponding static tree */ } FAR tree_desc; typedef ush Pos; typedef Pos FAR Posf; typedef unsigned IPos; /* A Pos is an index in the character window. We use short instead of int to * save space in the various tables. IPos is used only for parameter passing. */ typedef struct internal_state { z_streamp strm; /* pointer back to this zlib stream */ int status; /* as the name implies */ Bytef *pending_buf; /* output still pending */ ulg pending_buf_size; /* size of pending_buf */ Bytef *pending_out; /* next pending byte to output to the stream */ ulg pending; /* nb of bytes in the pending buffer */ int wrap; /* bit 0 true for zlib, bit 1 true for gzip */ gz_headerp gzhead; /* gzip header information to write */ ulg gzindex; /* where in extra, name, or comment */ Byte method; /* can only be DEFLATED */ int last_flush; /* value of flush param for previous deflate call */ /* used by deflate.c: */ uInt w_size; /* LZ77 window size (32K by default) */ uInt w_bits; /* log2(w_size) (8..16) */ uInt w_mask; /* w_size - 1 */ Bytef *window; /* Sliding window. Input bytes are read into the second half of the window, * and move to the first half later to keep a dictionary of at least wSize * bytes. With this organization, matches are limited to a distance of * wSize-MAX_MATCH bytes, but this ensures that IO is always * performed with a length multiple of the block size. Also, it limits * the window size to 64K, which is quite useful on MSDOS. * To do: use the user input buffer as sliding window. */ ulg window_size; /* Actual size of window: 2*wSize, except when the user input buffer * is directly used as sliding window. */ Posf *prev; /* Link to older string with same hash index. To limit the size of this * array to 64K, this link is maintained only for the last 32K strings. * An index in this array is thus a window index modulo 32K. */ Posf *head; /* Heads of the hash chains or NIL. */ uInt ins_h; /* hash index of string to be inserted */ uInt hash_size; /* number of elements in hash table */ uInt hash_bits; /* log2(hash_size) */ uInt hash_mask; /* hash_size-1 */ uInt hash_shift; /* Number of bits by which ins_h must be shifted at each input * step. It must be such that after MIN_MATCH steps, the oldest * byte no longer takes part in the hash key, that is: * hash_shift * MIN_MATCH >= hash_bits */ long block_start; /* Window position at the beginning of the current output block. Gets * negative when the window is moved backwards. */ uInt match_length; /* length of best match */ IPos prev_match; /* previous match */ int match_available; /* set if previous match exists */ uInt strstart; /* start of string to insert */ uInt match_start; /* start of matching string */ uInt lookahead; /* number of valid bytes ahead in window */ uInt prev_length; /* Length of the best match at previous step. Matches not greater than this * are discarded. This is used in the lazy match evaluation. */ uInt max_chain_length; /* To speed up deflation, hash chains are never searched beyond this * length. A higher limit improves compression ratio but degrades the * speed. */ uInt max_lazy_match; /* Attempt to find a better match only when the current match is strictly * smaller than this value. This mechanism is used only for compression * levels >= 4. */ # define max_insert_length max_lazy_match /* Insert new strings in the hash table only if the match length is not * greater than this length. This saves time but degrades compression. * max_insert_length is used only for compression levels <= 3. */ int level; /* compression level (1..9) */ int strategy; /* favor or force Huffman coding*/ uInt good_match; /* Use a faster search when the previous match is longer than this */ int nice_match; /* Stop searching when current match exceeds this */ /* used by trees.c: */ /* Didn't use ct_data typedef below to suppress compiler warning */ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ struct tree_desc_s l_desc; /* desc. for literal tree */ struct tree_desc_s d_desc; /* desc. for distance tree */ struct tree_desc_s bl_desc; /* desc. for bit length tree */ ush bl_count[MAX_BITS+1]; /* number of codes at each bit length for an optimal tree */ int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ int heap_len; /* number of elements in the heap */ int heap_max; /* element of largest frequency */ /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. * The same heap array is used to build all trees. */ uch depth[2*L_CODES+1]; /* Depth of each subtree used as tie breaker for trees of equal frequency */ uchf *l_buf; /* buffer for literals or lengths */ uInt lit_bufsize; /* Size of match buffer for literals/lengths. There are 4 reasons for * limiting lit_bufsize to 64K: * - frequencies can be kept in 16 bit counters * - if compression is not successful for the first block, all input * data is still in the window so we can still emit a stored block even * when input comes from standard input. (This can also be done for * all blocks if lit_bufsize is not greater than 32K.) * - if compression is not successful for a file smaller than 64K, we can * even emit a stored file instead of a stored block (saving 5 bytes). * This is applicable only for zip (not gzip or zlib). * - creating new Huffman trees less frequently may not provide fast * adaptation to changes in the input data statistics. (Take for * example a binary file with poorly compressible code followed by * a highly compressible string table.) Smaller buffer sizes give * fast adaptation but have of course the overhead of transmitting * trees more frequently. * - I can't count above 4 */ uInt last_lit; /* running index in l_buf */ ushf *d_buf; /* Buffer for distances. To simplify the code, d_buf and l_buf have * the same number of elements. To use different lengths, an extra flag * array would be necessary. */ ulg opt_len; /* bit length of current block with optimal trees */ ulg static_len; /* bit length of current block with static trees */ uInt matches; /* number of string matches in current block */ uInt insert; /* bytes at end of window left to insert */ #ifdef ZLIB_DEBUG ulg compressed_len; /* total bit length of compressed file mod 2^32 */ ulg bits_sent; /* bit length of compressed data sent mod 2^32 */ #endif ush bi_buf; /* Output buffer. bits are inserted starting at the bottom (least * significant bits). */ int bi_valid; /* Number of valid bits in bi_buf. All bits above the last valid bit * are always zero. */ ulg high_water; /* High water mark offset in window for initialized bytes -- bytes above * this are set to zero in order to avoid memory check warnings when * longest match routines access bytes past the input. This is then * updated to the new high water mark. */ } FAR deflate_state; /* Output a byte on the stream. * IN assertion: there is enough room in pending_buf. */ #define put_byte(s, c) {s->pending_buf[s->pending++] = (Bytef)(c);} #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) /* Minimum amount of lookahead, except at the end of the input file. * See deflate.c for comments about the MIN_MATCH+1. */ #define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD) /* In order to simplify the code, particularly on 16 bit machines, match * distances are limited to MAX_DIST instead of WSIZE. */ #define WIN_INIT MAX_MATCH /* Number of bytes after end of data in window to initialize in order to avoid memory checker errors from longest match routines */ /* in trees.c */ void ZLIB_INTERNAL _tr_init OF((deflate_state *s)); int ZLIB_INTERNAL _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); void ZLIB_INTERNAL _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len, int last)); void ZLIB_INTERNAL _tr_flush_bits OF((deflate_state *s)); void ZLIB_INTERNAL _tr_align OF((deflate_state *s)); void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len, int last)); #define d_code(dist) \ ((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)]) /* Mapping from a distance to a distance code. dist is the distance - 1 and * must not have side effects. _dist_code[256] and _dist_code[257] are never * used. */ #ifndef ZLIB_DEBUG /* Inline versions of _tr_tally for speed: */ #if defined(GEN_TREES_H) || !defined(STDC) extern uch ZLIB_INTERNAL _length_code[]; extern uch ZLIB_INTERNAL _dist_code[]; #else extern const uch ZLIB_INTERNAL _length_code[]; extern const uch ZLIB_INTERNAL _dist_code[]; #endif # define _tr_tally_lit(s, c, flush) \ { uch cc = (c); \ s->d_buf[s->last_lit] = 0; \ s->l_buf[s->last_lit++] = cc; \ s->dyn_ltree[cc].Freq++; \ flush = (s->last_lit == s->lit_bufsize-1); \ } # define _tr_tally_dist(s, distance, length, flush) \ { uch len = (uch)(length); \ ush dist = (ush)(distance); \ s->d_buf[s->last_lit] = dist; \ s->l_buf[s->last_lit++] = len; \ dist--; \ s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ s->dyn_dtree[d_code(dist)].Freq++; \ flush = (s->last_lit == s->lit_bufsize-1); \ } #else # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) # define _tr_tally_dist(s, distance, length, flush) \ flush = _tr_tally(s, distance, length) #endif #endif /* DEFLATE_H */
/* deflate.h -- internal compression state * Copyright (C) 1995-2016 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* @(#) $Id$ */ #ifndef DEFLATE_H #define DEFLATE_H #include "zutil.h" /* define NO_GZIP when compiling if you want to disable gzip header and trailer creation by deflate(). NO_GZIP would be used to avoid linking in the crc code when it is not needed. For shared libraries, gzip encoding should be left enabled. */ #ifndef NO_GZIP # define GZIP #endif /* =========================================================================== * Internal compression state. */ #define LENGTH_CODES 29 /* number of length codes, not counting the special END_BLOCK code */ #define LITERALS 256 /* number of literal bytes 0..255 */ #define L_CODES (LITERALS+1+LENGTH_CODES) /* number of Literal or Length codes, including the END_BLOCK code */ #define D_CODES 30 /* number of distance codes */ #define BL_CODES 19 /* number of codes used to transfer the bit lengths */ #define HEAP_SIZE (2*L_CODES+1) /* maximum heap size */ #define MAX_BITS 15 /* All codes must not exceed MAX_BITS bits */ #define Buf_size 16 /* size of bit buffer in bi_buf */ #define INIT_STATE 42 /* zlib header -> BUSY_STATE */ #ifdef GZIP # define GZIP_STATE 57 /* gzip header -> BUSY_STATE | EXTRA_STATE */ #endif #define EXTRA_STATE 69 /* gzip extra block -> NAME_STATE */ #define NAME_STATE 73 /* gzip file name -> COMMENT_STATE */ #define COMMENT_STATE 91 /* gzip comment -> HCRC_STATE */ #define HCRC_STATE 103 /* gzip header CRC -> BUSY_STATE */ #define BUSY_STATE 113 /* deflate -> FINISH_STATE */ #define FINISH_STATE 666 /* stream complete */ /* Stream status */ /* Data structure describing a single value and its code string. */ typedef struct ct_data_s { union { ush freq; /* frequency count */ ush code; /* bit string */ } fc; union { ush dad; /* father node in Huffman tree */ ush len; /* length of bit string */ } dl; } FAR ct_data; #define Freq fc.freq #define Code fc.code #define Dad dl.dad #define Len dl.len typedef struct static_tree_desc_s static_tree_desc; typedef struct tree_desc_s { ct_data *dyn_tree; /* the dynamic tree */ int max_code; /* largest code with non zero frequency */ const static_tree_desc *stat_desc; /* the corresponding static tree */ } FAR tree_desc; typedef ush Pos; typedef Pos FAR Posf; typedef unsigned IPos; /* A Pos is an index in the character window. We use short instead of int to * save space in the various tables. IPos is used only for parameter passing. */ typedef struct internal_state { z_streamp strm; /* pointer back to this zlib stream */ int status; /* as the name implies */ Bytef *pending_buf; /* output still pending */ ulg pending_buf_size; /* size of pending_buf */ Bytef *pending_out; /* next pending byte to output to the stream */ ulg pending; /* nb of bytes in the pending buffer */ int wrap; /* bit 0 true for zlib, bit 1 true for gzip */ gz_headerp gzhead; /* gzip header information to write */ ulg gzindex; /* where in extra, name, or comment */ Byte method; /* can only be DEFLATED */ int last_flush; /* value of flush param for previous deflate call */ /* used by deflate.c: */ uInt w_size; /* LZ77 window size (32K by default) */ uInt w_bits; /* log2(w_size) (8..16) */ uInt w_mask; /* w_size - 1 */ Bytef *window; /* Sliding window. Input bytes are read into the second half of the window, * and move to the first half later to keep a dictionary of at least wSize * bytes. With this organization, matches are limited to a distance of * wSize-MAX_MATCH bytes, but this ensures that IO is always * performed with a length multiple of the block size. Also, it limits * the window size to 64K, which is quite useful on MSDOS. * To do: use the user input buffer as sliding window. */ ulg window_size; /* Actual size of window: 2*wSize, except when the user input buffer * is directly used as sliding window. */ Posf *prev; /* Link to older string with same hash index. To limit the size of this * array to 64K, this link is maintained only for the last 32K strings. * An index in this array is thus a window index modulo 32K. */ Posf *head; /* Heads of the hash chains or NIL. */ uInt ins_h; /* hash index of string to be inserted */ uInt hash_size; /* number of elements in hash table */ uInt hash_bits; /* log2(hash_size) */ uInt hash_mask; /* hash_size-1 */ uInt hash_shift; /* Number of bits by which ins_h must be shifted at each input * step. It must be such that after MIN_MATCH steps, the oldest * byte no longer takes part in the hash key, that is: * hash_shift * MIN_MATCH >= hash_bits */ long block_start; /* Window position at the beginning of the current output block. Gets * negative when the window is moved backwards. */ uInt match_length; /* length of best match */ IPos prev_match; /* previous match */ int match_available; /* set if previous match exists */ uInt strstart; /* start of string to insert */ uInt match_start; /* start of matching string */ uInt lookahead; /* number of valid bytes ahead in window */ uInt prev_length; /* Length of the best match at previous step. Matches not greater than this * are discarded. This is used in the lazy match evaluation. */ uInt max_chain_length; /* To speed up deflation, hash chains are never searched beyond this * length. A higher limit improves compression ratio but degrades the * speed. */ uInt max_lazy_match; /* Attempt to find a better match only when the current match is strictly * smaller than this value. This mechanism is used only for compression * levels >= 4. */ # define max_insert_length max_lazy_match /* Insert new strings in the hash table only if the match length is not * greater than this length. This saves time but degrades compression. * max_insert_length is used only for compression levels <= 3. */ int level; /* compression level (1..9) */ int strategy; /* favor or force Huffman coding*/ uInt good_match; /* Use a faster search when the previous match is longer than this */ int nice_match; /* Stop searching when current match exceeds this */ /* used by trees.c: */ /* Didn't use ct_data typedef below to suppress compiler warning */ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ struct tree_desc_s l_desc; /* desc. for literal tree */ struct tree_desc_s d_desc; /* desc. for distance tree */ struct tree_desc_s bl_desc; /* desc. for bit length tree */ ush bl_count[MAX_BITS+1]; /* number of codes at each bit length for an optimal tree */ int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ int heap_len; /* number of elements in the heap */ int heap_max; /* element of largest frequency */ /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. * The same heap array is used to build all trees. */ uch depth[2*L_CODES+1]; /* Depth of each subtree used as tie breaker for trees of equal frequency */ uchf *l_buf; /* buffer for literals or lengths */ uInt lit_bufsize; /* Size of match buffer for literals/lengths. There are 4 reasons for * limiting lit_bufsize to 64K: * - frequencies can be kept in 16 bit counters * - if compression is not successful for the first block, all input * data is still in the window so we can still emit a stored block even * when input comes from standard input. (This can also be done for * all blocks if lit_bufsize is not greater than 32K.) * - if compression is not successful for a file smaller than 64K, we can * even emit a stored file instead of a stored block (saving 5 bytes). * This is applicable only for zip (not gzip or zlib). * - creating new Huffman trees less frequently may not provide fast * adaptation to changes in the input data statistics. (Take for * example a binary file with poorly compressible code followed by * a highly compressible string table.) Smaller buffer sizes give * fast adaptation but have of course the overhead of transmitting * trees more frequently. * - I can't count above 4 */ uInt last_lit; /* running index in l_buf */ ushf *d_buf; /* Buffer for distances. To simplify the code, d_buf and l_buf have * the same number of elements. To use different lengths, an extra flag * array would be necessary. */ ulg opt_len; /* bit length of current block with optimal trees */ ulg static_len; /* bit length of current block with static trees */ uInt matches; /* number of string matches in current block */ uInt insert; /* bytes at end of window left to insert */ #ifdef ZLIB_DEBUG ulg compressed_len; /* total bit length of compressed file mod 2^32 */ ulg bits_sent; /* bit length of compressed data sent mod 2^32 */ #endif ush bi_buf; /* Output buffer. bits are inserted starting at the bottom (least * significant bits). */ int bi_valid; /* Number of valid bits in bi_buf. All bits above the last valid bit * are always zero. */ ulg high_water; /* High water mark offset in window for initialized bytes -- bytes above * this are set to zero in order to avoid memory check warnings when * longest match routines access bytes past the input. This is then * updated to the new high water mark. */ } FAR deflate_state; /* Output a byte on the stream. * IN assertion: there is enough room in pending_buf. */ #define put_byte(s, c) {s->pending_buf[s->pending++] = (Bytef)(c);} #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) /* Minimum amount of lookahead, except at the end of the input file. * See deflate.c for comments about the MIN_MATCH+1. */ #define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD) /* In order to simplify the code, particularly on 16 bit machines, match * distances are limited to MAX_DIST instead of WSIZE. */ #define WIN_INIT MAX_MATCH /* Number of bytes after end of data in window to initialize in order to avoid memory checker errors from longest match routines */ /* in trees.c */ void ZLIB_INTERNAL _tr_init OF((deflate_state *s)); int ZLIB_INTERNAL _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); void ZLIB_INTERNAL _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len, int last)); void ZLIB_INTERNAL _tr_flush_bits OF((deflate_state *s)); void ZLIB_INTERNAL _tr_align OF((deflate_state *s)); void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len, int last)); #define d_code(dist) \ ((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)]) /* Mapping from a distance to a distance code. dist is the distance - 1 and * must not have side effects. _dist_code[256] and _dist_code[257] are never * used. */ #ifndef ZLIB_DEBUG /* Inline versions of _tr_tally for speed: */ #if defined(GEN_TREES_H) || !defined(STDC) extern uch ZLIB_INTERNAL _length_code[]; extern uch ZLIB_INTERNAL _dist_code[]; #else extern const uch ZLIB_INTERNAL _length_code[]; extern const uch ZLIB_INTERNAL _dist_code[]; #endif # define _tr_tally_lit(s, c, flush) \ { uch cc = (c); \ s->d_buf[s->last_lit] = 0; \ s->l_buf[s->last_lit++] = cc; \ s->dyn_ltree[cc].Freq++; \ flush = (s->last_lit == s->lit_bufsize-1); \ } # define _tr_tally_dist(s, distance, length, flush) \ { uch len = (uch)(length); \ ush dist = (ush)(distance); \ s->d_buf[s->last_lit] = dist; \ s->l_buf[s->last_lit++] = len; \ dist--; \ s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ s->dyn_dtree[d_code(dist)].Freq++; \ flush = (s->last_lit == s->lit_bufsize-1); \ } #else # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) # define _tr_tally_dist(s, distance, length, flush) \ flush = _tr_tally(s, distance, length) #endif #endif /* DEFLATE_H */
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/tests/palsuite/miscellaneous/FormatMessageW/test1/test.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source : test.c ** ** Purpose: Test for FormatMessageW() function ** ** **=========================================================*/ #define UNICODE #include <palsuite.h> PALTEST(miscellaneous_FormatMessageW_test1_paltest_formatmessagew_test1, "miscellaneous/FormatMessageW/test1/paltest_formatmessagew_test1") { WCHAR TheString[] = {'P','a','l',' ','T','e','s','t','\0'}; WCHAR OutBuffer[128]; int ReturnResult; /* * Initialize the PAL and return FAILURE if this fails */ if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } ReturnResult = FormatMessage( FORMAT_MESSAGE_FROM_STRING, /* source and processing options */ TheString, /* message source */ 0, /* message identifier */ 0, /* language identifier */ OutBuffer, /* message buffer */ 1024, /* maximum size of message buffer */ NULL /* array of message inserts */ ); if(ReturnResult == 0) { Fail("ERROR: The return value was 0, which indicates failure. " "The function failed when trying to Format a simple string" ", with no formatters in it."); } if(memcmp(OutBuffer,TheString,wcslen(OutBuffer)*2+2) != 0) { Fail("ERROR: The formatted string should be %s but is really %s.", convertC(TheString), convertC(OutBuffer)); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source : test.c ** ** Purpose: Test for FormatMessageW() function ** ** **=========================================================*/ #define UNICODE #include <palsuite.h> PALTEST(miscellaneous_FormatMessageW_test1_paltest_formatmessagew_test1, "miscellaneous/FormatMessageW/test1/paltest_formatmessagew_test1") { WCHAR TheString[] = {'P','a','l',' ','T','e','s','t','\0'}; WCHAR OutBuffer[128]; int ReturnResult; /* * Initialize the PAL and return FAILURE if this fails */ if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } ReturnResult = FormatMessage( FORMAT_MESSAGE_FROM_STRING, /* source and processing options */ TheString, /* message source */ 0, /* message identifier */ 0, /* language identifier */ OutBuffer, /* message buffer */ 1024, /* maximum size of message buffer */ NULL /* array of message inserts */ ); if(ReturnResult == 0) { Fail("ERROR: The return value was 0, which indicates failure. " "The function failed when trying to Format a simple string" ", with no formatters in it."); } if(memcmp(OutBuffer,TheString,wcslen(OutBuffer)*2+2) != 0) { Fail("ERROR: The formatted string should be %s but is really %s.", convertC(TheString), convertC(OutBuffer)); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/tests/palsuite/file_io/GetStdHandle/test1/GetStdHandle.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: GetStdHandle.c (test 1) ** ** Purpose: Tests the PAL implementation of the GetStdHandle function. ** ** **===================================================================*/ #include <palsuite.h> PALTEST(file_io_GetStdHandle_test1_paltest_getstdhandle_test1, "file_io/GetStdHandle/test1/paltest_getstdhandle_test1") { HANDLE hFile = NULL; DWORD dwBytesWritten = 0; DWORD dwFileType; BOOL bRc = FALSE; const char* szText = "this is a test of GetStdHandle\n"; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } /* * attempt to get an invalid handle */ hFile = GetStdHandle(-2); if (hFile != INVALID_HANDLE_VALUE) { Fail("GetStdHandle: ERROR -> A request for the STD_INPUT_HANDLE " "returned an invalid handle.\n"); } /* * test the STD_INPUT_HANDLE handle */ hFile = GetStdHandle(STD_INPUT_HANDLE); if (hFile == INVALID_HANDLE_VALUE) { Fail("GetStdHandle: ERROR -> A request for the STD_INPUT_HANDLE " "returned an invalid handle.\n"); } /* an attempt to write to the input handle should fail */ /* I don't know how to automate a read from the input handle */ bRc = WriteFile(hFile, szText, (DWORD)strlen(szText), &dwBytesWritten, NULL); if (bRc != FALSE) { Fail("GetStdHandle: ERROR -> WriteFile was able to write to " "STD_INPUT_HANDLE when it should have failed.\n"); } /* * test the STD_OUTPUT_HANDLE handle */ hFile = GetStdHandle(STD_OUTPUT_HANDLE); if (hFile == INVALID_HANDLE_VALUE) { Fail("GetStdHandle: ERROR -> A request for the STD_OUTPUT_HANDLE " "returned an invalid handle.\n"); } /* try to write to the output handle */ bRc = WriteFile(hFile, szText, (DWORD)strlen(szText), &dwBytesWritten, NULL); if (bRc != TRUE) { Fail("GetStdHandle: ERROR -> WriteFile failed to write to " "STD_OUTPUT_HANDLE with the error %ld\n", GetLastError()); } /* test the STD_ERROR_HANDLE handle */ hFile = GetStdHandle(STD_ERROR_HANDLE); if (hFile == INVALID_HANDLE_VALUE) { Fail("GetStdHandle: ERROR -> A request for the STD_ERROR_HANDLE " "returned an invalid handle.\n"); } /* try to write to the error handle */ bRc = WriteFile(hFile, szText, (DWORD)strlen(szText), &dwBytesWritten, NULL); if (bRc != TRUE) { Fail("GetStdHandle: ERROR -> WriteFile failed to write to " "STD_ERROR_HANDLE with the error %ld\n", GetLastError()); } /* check to see if we can CloseHandle works on the STD_ERROR_HANDLE */ if (!CloseHandle(hFile)) { Fail("GetStdHandle: ERROR -> CloseHandle failed. GetLastError " "returned %u.\n", GetLastError()); } /* try to write to the closed error handle */ bRc = WriteFile(hFile, szText, (DWORD)strlen(szText), &dwBytesWritten, NULL); if (bRc) { Fail("GetStdHandle: ERROR -> WriteFile was able to write to the closed" " STD_ERROR_HANDLE handle.\n"); } PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*===================================================================== ** ** Source: GetStdHandle.c (test 1) ** ** Purpose: Tests the PAL implementation of the GetStdHandle function. ** ** **===================================================================*/ #include <palsuite.h> PALTEST(file_io_GetStdHandle_test1_paltest_getstdhandle_test1, "file_io/GetStdHandle/test1/paltest_getstdhandle_test1") { HANDLE hFile = NULL; DWORD dwBytesWritten = 0; DWORD dwFileType; BOOL bRc = FALSE; const char* szText = "this is a test of GetStdHandle\n"; if (0 != PAL_Initialize(argc,argv)) { return FAIL; } /* * attempt to get an invalid handle */ hFile = GetStdHandle(-2); if (hFile != INVALID_HANDLE_VALUE) { Fail("GetStdHandle: ERROR -> A request for the STD_INPUT_HANDLE " "returned an invalid handle.\n"); } /* * test the STD_INPUT_HANDLE handle */ hFile = GetStdHandle(STD_INPUT_HANDLE); if (hFile == INVALID_HANDLE_VALUE) { Fail("GetStdHandle: ERROR -> A request for the STD_INPUT_HANDLE " "returned an invalid handle.\n"); } /* an attempt to write to the input handle should fail */ /* I don't know how to automate a read from the input handle */ bRc = WriteFile(hFile, szText, (DWORD)strlen(szText), &dwBytesWritten, NULL); if (bRc != FALSE) { Fail("GetStdHandle: ERROR -> WriteFile was able to write to " "STD_INPUT_HANDLE when it should have failed.\n"); } /* * test the STD_OUTPUT_HANDLE handle */ hFile = GetStdHandle(STD_OUTPUT_HANDLE); if (hFile == INVALID_HANDLE_VALUE) { Fail("GetStdHandle: ERROR -> A request for the STD_OUTPUT_HANDLE " "returned an invalid handle.\n"); } /* try to write to the output handle */ bRc = WriteFile(hFile, szText, (DWORD)strlen(szText), &dwBytesWritten, NULL); if (bRc != TRUE) { Fail("GetStdHandle: ERROR -> WriteFile failed to write to " "STD_OUTPUT_HANDLE with the error %ld\n", GetLastError()); } /* test the STD_ERROR_HANDLE handle */ hFile = GetStdHandle(STD_ERROR_HANDLE); if (hFile == INVALID_HANDLE_VALUE) { Fail("GetStdHandle: ERROR -> A request for the STD_ERROR_HANDLE " "returned an invalid handle.\n"); } /* try to write to the error handle */ bRc = WriteFile(hFile, szText, (DWORD)strlen(szText), &dwBytesWritten, NULL); if (bRc != TRUE) { Fail("GetStdHandle: ERROR -> WriteFile failed to write to " "STD_ERROR_HANDLE with the error %ld\n", GetLastError()); } /* check to see if we can CloseHandle works on the STD_ERROR_HANDLE */ if (!CloseHandle(hFile)) { Fail("GetStdHandle: ERROR -> CloseHandle failed. GetLastError " "returned %u.\n", GetLastError()); } /* try to write to the closed error handle */ bRc = WriteFile(hFile, szText, (DWORD)strlen(szText), &dwBytesWritten, NULL); if (bRc) { Fail("GetStdHandle: ERROR -> WriteFile was able to write to the closed" " STD_ERROR_HANDLE handle.\n"); } PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/src/libunwind/include/win/endian.h
// This is an incomplete & imprecice implementation of the // standard file by the same name // Since this is only intended for VC++ compilers // use #pragma once instead of guard macros #pragma once #ifdef _MSC_VER // Only for cross compilation to windows #define __LITTLE_ENDIAN 1234 #define __BIG_ENDIAN 4321 #define __BYTE_ORDER __LITTLE_ENDIAN #endif // _MSC_VER
// This is an incomplete & imprecice implementation of the // standard file by the same name // Since this is only intended for VC++ compilers // use #pragma once instead of guard macros #pragma once #ifdef _MSC_VER // Only for cross compilation to windows #define __LITTLE_ENDIAN 1234 #define __BIG_ENDIAN 4321 #define __BYTE_ORDER __LITTLE_ENDIAN #endif // _MSC_VER
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/native/external/zlib/inffast.h
/* inffast.h -- header to use inffast.c * Copyright (C) 1995-2003, 2010 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ void ZLIB_INTERNAL inflate_fast OF((z_streamp strm, unsigned start));
/* inffast.h -- header to use inffast.c * Copyright (C) 1995-2003, 2010 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ void ZLIB_INTERNAL inflate_fast OF((z_streamp strm, unsigned start));
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/jit/regset.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX RegSet XX XX XX XX Represents the register set, and their states during code generation XX XX Can select an unused register, keeps track of the contents of the XX XX registers, and can spill registers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "emit.h" /*****************************************************************************/ #ifdef TARGET_ARM64 const regMaskSmall regMasks[] = { #define REGDEF(name, rnum, mask, xname, wname) mask, #include "register.h" }; #else // !TARGET_ARM64 const regMaskSmall regMasks[] = { #define REGDEF(name, rnum, mask, sname) mask, #include "register.h" }; #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX RegSet XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ //------------------------------------------------------------------------ // verifyRegUsed: verify that the register is marked as used. // // Arguments: // reg - The register to verify. // // Return Value: // None. // // Assumptions: // The caller must have ensured that the register is already marked // as used. // // Notes: // This method is intended to be called during code generation, and // should simply validate that the register (or registers) have // already been added to the modified set. void RegSet::verifyRegUsed(regNumber reg) { // TODO-Cleanup: we need to identify the places where the register // is not marked as used when this is called. rsSetRegsModified(genRegMask(reg)); } //------------------------------------------------------------------------ // verifyRegistersUsed: verify that the registers are marked as used. // // Arguments: // regs - The registers to verify. // // Return Value: // None. // // Assumptions: // The caller must have ensured that the registers are already marked // as used. // // Notes: // This method is intended to be called during code generation, and // should simply validate that the register (or registers) have // already been added to the modified set. void RegSet::verifyRegistersUsed(regMaskTP regMask) { if (m_rsCompiler->opts.OptimizationDisabled()) { return; } if (regMask == RBM_NONE) { return; } // TODO-Cleanup: we need to identify the places where the registers // are not marked as used when this is called. rsSetRegsModified(regMask); } void RegSet::rsClearRegsModified() { assert(m_rsCompiler->lvaDoneFrameLayout < Compiler::FINAL_FRAME_LAYOUT); #ifdef DEBUG if (m_rsCompiler->verbose) { printf("Clearing modified regs.\n"); } rsModifiedRegsMaskInitialized = true; #endif // DEBUG rsModifiedRegsMask = RBM_NONE; } void RegSet::rsSetRegsModified(regMaskTP mask DEBUGARG(bool suppressDump)) { assert(mask != RBM_NONE); assert(rsModifiedRegsMaskInitialized); // We can't update the modified registers set after final frame layout (that is, during code // generation and after). Ignore prolog and epilog generation: they call register tracking to // modify rbp, for example, even in functions that use rbp as a frame pointer. Make sure normal // code generation isn't actually adding to set of modified registers. // Frame layout is only affected by callee-saved registers, so only ensure that callee-saved // registers aren't modified after final frame layout. assert((m_rsCompiler->lvaDoneFrameLayout < Compiler::FINAL_FRAME_LAYOUT) || m_rsCompiler->compGeneratingProlog || m_rsCompiler->compGeneratingEpilog || (((rsModifiedRegsMask | mask) & RBM_CALLEE_SAVED) == (rsModifiedRegsMask & RBM_CALLEE_SAVED))); #ifdef DEBUG if (m_rsCompiler->verbose && !suppressDump) { if (rsModifiedRegsMask != (rsModifiedRegsMask | mask)) { printf("Marking regs modified: "); dspRegMask(mask); printf(" ("); dspRegMask(rsModifiedRegsMask); printf(" => "); dspRegMask(rsModifiedRegsMask | mask); printf(")\n"); } } #endif // DEBUG rsModifiedRegsMask |= mask; } void RegSet::rsRemoveRegsModified(regMaskTP mask) { assert(mask != RBM_NONE); assert(rsModifiedRegsMaskInitialized); // See comment in rsSetRegsModified(). assert((m_rsCompiler->lvaDoneFrameLayout < Compiler::FINAL_FRAME_LAYOUT) || m_rsCompiler->compGeneratingProlog || m_rsCompiler->compGeneratingEpilog || (((rsModifiedRegsMask & ~mask) & RBM_CALLEE_SAVED) == (rsModifiedRegsMask & RBM_CALLEE_SAVED))); #ifdef DEBUG if (m_rsCompiler->verbose) { printf("Removing modified regs: "); dspRegMask(mask); if (rsModifiedRegsMask == (rsModifiedRegsMask & ~mask)) { printf(" (unchanged)"); } else { printf(" ("); dspRegMask(rsModifiedRegsMask); printf(" => "); dspRegMask(rsModifiedRegsMask & ~mask); printf(")"); } printf("\n"); } #endif // DEBUG rsModifiedRegsMask &= ~mask; } void RegSet::SetMaskVars(regMaskTP newMaskVars) { #ifdef DEBUG if (m_rsCompiler->verbose) { printf("\t\t\t\t\t\t\tLive regs: "); if (_rsMaskVars == newMaskVars) { printf("(unchanged) "); } else { printRegMaskInt(_rsMaskVars); m_rsCompiler->GetEmitter()->emitDispRegSet(_rsMaskVars); printf(" => "); } printRegMaskInt(newMaskVars); m_rsCompiler->GetEmitter()->emitDispRegSet(newMaskVars); printf("\n"); } #endif // DEBUG _rsMaskVars = newMaskVars; } /*****************************************************************************/ RegSet::RegSet(Compiler* compiler, GCInfo& gcInfo) : m_rsCompiler(compiler), m_rsGCInfo(gcInfo) { /* Initialize the spill logic */ rsSpillInit(); /* Initialize the argument register count */ // TODO-Cleanup: Consider moving intRegState and floatRegState to RegSet. They used // to be initialized here, but are now initialized in the CodeGen constructor. // intRegState.rsCurRegArgNum = 0; // loatRegState.rsCurRegArgNum = 0; rsMaskResvd = RBM_NONE; #ifdef TARGET_ARMARCH rsMaskCalleeSaved = RBM_NONE; #endif // TARGET_ARMARCH #ifdef TARGET_ARM rsMaskPreSpillRegArg = RBM_NONE; rsMaskPreSpillAlign = RBM_NONE; #endif #ifdef DEBUG rsModifiedRegsMaskInitialized = false; #endif // DEBUG } /***************************************************************************** * * Finds the SpillDsc corresponding to 'tree' assuming it was spilled from 'reg'. */ RegSet::SpillDsc* RegSet::rsGetSpillInfo(GenTree* tree, regNumber reg, SpillDsc** pPrevDsc) { /* Normally, trees are unspilled in the order of being spilled due to the post-order walking of trees during code-gen. However, this will not be true for something like a GT_ARR_ELEM node */ SpillDsc* prev; SpillDsc* dsc; for (prev = nullptr, dsc = rsSpillDesc[reg]; dsc != nullptr; prev = dsc, dsc = dsc->spillNext) { if (dsc->spillTree == tree) { break; } } if (pPrevDsc) { *pPrevDsc = prev; } return dsc; } //------------------------------------------------------------ // rsSpillTree: Spill the tree held in 'reg'. // // Arguments: // reg - Register of tree node that is to be spilled // tree - GenTree node that is being spilled // regIdx - Register index identifying the specific result // register of a multi-reg call node. For single-reg // producing tree nodes its value is zero. // // Return Value: // None. // // Notes: // For multi-reg nodes, only the spill flag associated with this reg is cleared. // The spill flag on the node should be cleared by the caller of this method. // void RegSet::rsSpillTree(regNumber reg, GenTree* tree, unsigned regIdx /* =0 */) { assert(tree != nullptr); GenTreeCall* call = nullptr; GenTreeLclVar* lcl = nullptr; var_types treeType; #if defined(TARGET_ARM) GenTreePutArgSplit* splitArg = nullptr; GenTreeMultiRegOp* multiReg = nullptr; #endif if (tree->IsMultiRegCall()) { call = tree->AsCall(); const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc(); treeType = retTypeDesc->GetReturnRegType(regIdx); } #ifdef TARGET_ARM else if (tree->OperIsPutArgSplit()) { splitArg = tree->AsPutArgSplit(); treeType = splitArg->GetRegType(regIdx); } else if (tree->OperIsMultiRegOp()) { multiReg = tree->AsMultiRegOp(); treeType = multiReg->GetRegType(regIdx); } #endif // TARGET_ARM else if (tree->IsMultiRegLclVar()) { LclVarDsc* varDsc = m_rsCompiler->lvaGetDesc(tree->AsLclVar()); treeType = varDsc->TypeGet(); } else { treeType = tree->TypeGet(); } var_types tempType = RegSet::tmpNormalizeType(treeType); regMaskTP mask; bool floatSpill = false; if (isFloatRegType(treeType)) { floatSpill = true; mask = genRegMaskFloat(reg, treeType); } else { mask = genRegMask(reg); } rsNeededSpillReg = true; // We should only be spilling nodes marked for spill, // vars should be handled elsewhere, and to prevent // spilling twice clear GTF_SPILL flag on tree node. // // In case of multi-reg nodes, only the spill flag associated with this reg is cleared. // The spill flag on the node should be cleared by the caller of this method. assert((tree->gtFlags & GTF_SPILL) != 0); GenTreeFlags regFlags = GTF_EMPTY; if (call != nullptr) { regFlags = call->GetRegSpillFlagByIdx(regIdx); assert((regFlags & GTF_SPILL) != 0); regFlags &= ~GTF_SPILL; } #ifdef TARGET_ARM else if (splitArg != nullptr) { regFlags = splitArg->GetRegSpillFlagByIdx(regIdx); assert((regFlags & GTF_SPILL) != 0); regFlags &= ~GTF_SPILL; } else if (multiReg != nullptr) { regFlags = multiReg->GetRegSpillFlagByIdx(regIdx); assert((regFlags & GTF_SPILL) != 0); regFlags &= ~GTF_SPILL; } #endif // TARGET_ARM else if (lcl != nullptr) { regFlags = lcl->GetRegSpillFlagByIdx(regIdx); assert((regFlags & GTF_SPILL) != 0); regFlags &= ~GTF_SPILL; } else { assert(!varTypeIsMultiReg(tree)); tree->gtFlags &= ~GTF_SPILL; } #if defined(TARGET_ARM) assert(tree->GetRegNum() == reg || (call != nullptr && call->GetRegNumByIdx(regIdx) == reg) || (splitArg != nullptr && splitArg->GetRegNumByIdx(regIdx) == reg) || (multiReg != nullptr && multiReg->GetRegNumByIdx(regIdx) == reg)); #else assert(tree->GetRegNum() == reg || (call != nullptr && call->GetRegNumByIdx(regIdx) == reg)); #endif // !TARGET_ARM // Are any registers free for spillage? SpillDsc* spill = SpillDsc::alloc(m_rsCompiler, this, tempType); // Grab a temp to store the spilled value TempDsc* temp = tmpGetTemp(tempType); spill->spillTemp = temp; tempType = temp->tdTempType(); // Remember what it is we have spilled spill->spillTree = tree; #ifdef DEBUG if (m_rsCompiler->verbose) { printf("\t\t\t\t\t\t\tThe register %s spilled with ", m_rsCompiler->compRegVarName(reg)); Compiler::printTreeID(spill->spillTree); } #endif // 'lastDsc' is 'spill' for simple cases, and will point to the last // multi-use descriptor if 'reg' is being multi-used SpillDsc* lastDsc = spill; // Insert the spill descriptor(s) in the list lastDsc->spillNext = rsSpillDesc[reg]; rsSpillDesc[reg] = spill; #ifdef DEBUG if (m_rsCompiler->verbose) { printf("\n"); } #endif // Generate the code to spill the register var_types storeType = floatSpill ? treeType : tempType; m_rsCompiler->codeGen->spillReg(storeType, temp, reg); // Mark the tree node as having been spilled rsMarkSpill(tree, reg); // In case of multi-reg call node also mark the specific // result reg as spilled. if (call != nullptr) { regFlags |= GTF_SPILLED; call->SetRegSpillFlagByIdx(regFlags, regIdx); } #ifdef TARGET_ARM else if (splitArg != nullptr) { regFlags |= GTF_SPILLED; splitArg->SetRegSpillFlagByIdx(regFlags, regIdx); } else if (multiReg != nullptr) { regFlags |= GTF_SPILLED; multiReg->SetRegSpillFlagByIdx(regFlags, regIdx); } #endif // TARGET_ARM else if (lcl != nullptr) { regFlags |= GTF_SPILLED; lcl->SetRegSpillFlagByIdx(regFlags, regIdx); } } #if defined(TARGET_X86) /***************************************************************************** * * Spill the top of the FP x87 stack. */ void RegSet::rsSpillFPStack(GenTreeCall* call) { SpillDsc* spill; TempDsc* temp; var_types treeType = call->TypeGet(); spill = SpillDsc::alloc(m_rsCompiler, this, treeType); /* Grab a temp to store the spilled value */ spill->spillTemp = temp = tmpGetTemp(treeType); /* Remember what it is we have spilled */ spill->spillTree = call; SpillDsc* lastDsc = spill; regNumber reg = call->GetRegNum(); lastDsc->spillNext = rsSpillDesc[reg]; rsSpillDesc[reg] = spill; #ifdef DEBUG if (m_rsCompiler->verbose) printf("\n"); #endif m_rsCompiler->codeGen->GetEmitter()->emitIns_S(INS_fstp, emitActualTypeSize(treeType), temp->tdTempNum(), 0); /* Mark the tree node as having been spilled */ rsMarkSpill(call, reg); } #endif // defined(TARGET_X86) /***************************************************************************** * * Get the temp that was spilled from the given register (and free its * spill descriptor while we're at it). Returns the temp (i.e. local var) */ TempDsc* RegSet::rsGetSpillTempWord(regNumber reg, SpillDsc* dsc, SpillDsc* prevDsc) { assert((prevDsc == nullptr) || (prevDsc->spillNext == dsc)); /* Remove this spill entry from the register's list */ (prevDsc ? prevDsc->spillNext : rsSpillDesc[reg]) = dsc->spillNext; /* Remember which temp the value is in */ TempDsc* temp = dsc->spillTemp; SpillDsc::freeDsc(this, dsc); /* return the temp variable */ return temp; } //--------------------------------------------------------------------- // rsUnspillInPlace: The given tree operand has been spilled; just mark // it as unspilled so that we can use it as "normal" local. // // Arguments: // tree - GenTree that needs to be marked as unspilled. // oldReg - reg of tree that was spilled. // // Return Value: // None. // // Assumptions: // 1. It is the responsibility of the caller to free the spill temp. // 2. RyuJIT backend specific: In case of multi-reg call node // GTF_SPILLED flag associated with reg is cleared. It is the // responsibility of caller to clear GTF_SPILLED flag on call node // itself after ensuring there are no outstanding regs in GTF_SPILLED // state. // TempDsc* RegSet::rsUnspillInPlace(GenTree* tree, regNumber oldReg, unsigned regIdx /* =0 */) { // Get the tree's SpillDsc SpillDsc* prevDsc; SpillDsc* spillDsc = rsGetSpillInfo(tree, oldReg, &prevDsc); PREFIX_ASSUME(spillDsc != nullptr); // Get the temp TempDsc* temp = rsGetSpillTempWord(oldReg, spillDsc, prevDsc); // The value is now unspilled if (tree->IsMultiRegCall()) { GenTreeCall* call = tree->AsCall(); GenTreeFlags flags = call->GetRegSpillFlagByIdx(regIdx); flags &= ~GTF_SPILLED; call->SetRegSpillFlagByIdx(flags, regIdx); } #if defined(TARGET_ARM) else if (tree->OperIsPutArgSplit()) { GenTreePutArgSplit* splitArg = tree->AsPutArgSplit(); GenTreeFlags flags = splitArg->GetRegSpillFlagByIdx(regIdx); flags &= ~GTF_SPILLED; splitArg->SetRegSpillFlagByIdx(flags, regIdx); } else if (tree->OperIsMultiRegOp()) { GenTreeMultiRegOp* multiReg = tree->AsMultiRegOp(); GenTreeFlags flags = multiReg->GetRegSpillFlagByIdx(regIdx); flags &= ~GTF_SPILLED; multiReg->SetRegSpillFlagByIdx(flags, regIdx); } #endif // TARGET_ARM else if (tree->IsMultiRegLclVar()) { GenTreeLclVar* lcl = tree->AsLclVar(); GenTreeFlags flags = lcl->GetRegSpillFlagByIdx(regIdx); flags &= ~GTF_SPILLED; lcl->SetRegSpillFlagByIdx(flags, regIdx); } else { tree->gtFlags &= ~GTF_SPILLED; } #ifdef DEBUG if (m_rsCompiler->verbose) { printf("\t\t\t\t\t\t\tTree-Node marked unspilled from "); Compiler::printTreeID(tree); printf("\n"); } #endif return temp; } void RegSet::rsMarkSpill(GenTree* tree, regNumber reg) { tree->gtFlags |= GTF_SPILLED; } /*****************************************************************************/ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX TempsInfo XX XX XX XX The temporary lclVars allocated by the compiler for code generation XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ void RegSet::tmpInit() { tmpCount = 0; tmpSize = UINT_MAX; #ifdef DEBUG tmpGetCount = 0; #endif memset(tmpFree, 0, sizeof(tmpFree)); memset(tmpUsed, 0, sizeof(tmpUsed)); } /* static */ var_types RegSet::tmpNormalizeType(var_types type) { type = genActualType(type); #if defined(FEATURE_SIMD) // We always spill SIMD12 to a 16-byte SIMD16 temp. // This is because we don't have a single instruction to store 12 bytes, so we want // to ensure that we always have the full 16 bytes for loading & storing the value. // We also allocate non-argument locals as 16 bytes; see lvSize(). if (type == TYP_SIMD12) { type = TYP_SIMD16; } #endif // defined(FEATURE_SIMD) && !defined(TARGET_64BIT) return type; } /***************************************************************************** * * Allocate a temp of the given size (and type, if tracking pointers for * the garbage collector). */ TempDsc* RegSet::tmpGetTemp(var_types type) { type = tmpNormalizeType(type); unsigned size = genTypeSize(type); // If TYP_STRUCT ever gets in here we do bad things (tmpSlot returns -1) noway_assert(size >= sizeof(int)); /* Find the slot to search for a free temp of the right size */ unsigned slot = tmpSlot(size); /* Look for a temp with a matching type */ TempDsc** last = &tmpFree[slot]; TempDsc* temp; for (temp = *last; temp; last = &temp->tdNext, temp = *last) { /* Does the type match? */ if (temp->tdTempType() == type) { /* We have a match -- remove it from the free list */ *last = temp->tdNext; break; } } #ifdef DEBUG /* Do we need to allocate a new temp */ bool isNewTemp = false; #endif // DEBUG noway_assert(temp != nullptr); #ifdef DEBUG if (m_rsCompiler->verbose) { printf("%s temp #%u, slot %u, size = %u\n", isNewTemp ? "created" : "reused", -temp->tdTempNum(), slot, temp->tdTempSize()); } tmpGetCount++; #endif // DEBUG temp->tdNext = tmpUsed[slot]; tmpUsed[slot] = temp; return temp; } /***************************************************************************** * Preallocate 'count' temps of type 'type'. This type must be a normalized * type (by the definition of tmpNormalizeType()). * * This is used at the end of LSRA, which knows precisely the maximum concurrent * number of each type of spill temp needed, before code generation. Code generation * then uses these preallocated temp. If code generation ever asks for more than * has been preallocated, it is a fatal error. */ void RegSet::tmpPreAllocateTemps(var_types type, unsigned count) { assert(type == tmpNormalizeType(type)); unsigned size = genTypeSize(type); // If TYP_STRUCT ever gets in here we do bad things (tmpSlot returns -1) noway_assert(size >= sizeof(int)); // Find the slot to search for a free temp of the right size. // Note that slots are shared by types of the identical size (e.g., TYP_REF and TYP_LONG on AMD64), // so we can't assert that the slot is empty when we get here. unsigned slot = tmpSlot(size); for (unsigned i = 0; i < count; i++) { tmpCount++; tmpSize += size; #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Adjust tmpSize to accommodate possible alignment padding. // Note that at this point the offsets aren't yet finalized, so we don't yet know if it will be required. tmpSize += TARGET_POINTER_SIZE; } #endif // TARGET_ARM TempDsc* temp = new (m_rsCompiler, CMK_Unknown) TempDsc(-((int)tmpCount), size, type); #ifdef DEBUG if (m_rsCompiler->verbose) { printf("pre-allocated temp #%u, slot %u, size = %u\n", -temp->tdTempNum(), slot, temp->tdTempSize()); } #endif // DEBUG // Add it to the front of the appropriate slot list. temp->tdNext = tmpFree[slot]; tmpFree[slot] = temp; } } /***************************************************************************** * * Release the given temp. */ void RegSet::tmpRlsTemp(TempDsc* temp) { assert(temp != nullptr); unsigned slot; /* Add the temp to the 'free' list */ slot = tmpSlot(temp->tdTempSize()); #ifdef DEBUG if (m_rsCompiler->verbose) { printf("release temp #%u, slot %u, size = %u\n", -temp->tdTempNum(), slot, temp->tdTempSize()); } assert(tmpGetCount); tmpGetCount--; #endif // Remove it from the 'used' list. TempDsc** last = &tmpUsed[slot]; TempDsc* t; for (t = *last; t != nullptr; last = &t->tdNext, t = *last) { if (t == temp) { /* Found it! -- remove it from the 'used' list */ *last = t->tdNext; break; } } assert(t != nullptr); // We better have found it! // Add it to the free list. temp->tdNext = tmpFree[slot]; tmpFree[slot] = temp; } /***************************************************************************** * Given a temp number, find the corresponding temp. * * When looking for temps on the "free" list, this can only be used after code generation. (This is * simply because we have an assert to that effect in tmpListBeg(); we could relax that, or hoist * the assert to the appropriate callers.) * * When looking for temps on the "used" list, this can be used any time. */ TempDsc* RegSet::tmpFindNum(int tnum, TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE */) const { assert(tnum < 0); // temp numbers are negative for (TempDsc* temp = tmpListBeg(usageType); temp != nullptr; temp = tmpListNxt(temp, usageType)) { if (temp->tdTempNum() == tnum) { return temp; } } return nullptr; } /***************************************************************************** * * A helper function is used to iterate over all the temps. */ TempDsc* RegSet::tmpListBeg(TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE */) const { TempDsc* const* tmpLists; if (usageType == TEMP_USAGE_FREE) { tmpLists = tmpFree; } else { tmpLists = tmpUsed; } // Return the first temp in the slot for the smallest size unsigned slot = 0; while (slot < (TEMP_SLOT_COUNT - 1) && tmpLists[slot] == nullptr) { slot++; } TempDsc* temp = tmpLists[slot]; return temp; } /***************************************************************************** * Used with tmpListBeg() to iterate over the list of temps. */ TempDsc* RegSet::tmpListNxt(TempDsc* curTemp, TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE */) const { assert(curTemp != nullptr); TempDsc* temp = curTemp->tdNext; if (temp == nullptr) { unsigned size = curTemp->tdTempSize(); // If there are no more temps in the list, check if there are more // slots (for bigger sized temps) to walk. TempDsc* const* tmpLists; if (usageType == TEMP_USAGE_FREE) { tmpLists = tmpFree; } else { tmpLists = tmpUsed; } while (size < TEMP_MAX_SIZE && temp == nullptr) { size += sizeof(int); unsigned slot = tmpSlot(size); temp = tmpLists[slot]; } assert((temp == nullptr) || (temp->tdTempSize() == size)); } return temp; } #ifdef DEBUG /***************************************************************************** * Return 'true' if all allocated temps are free (not in use). */ bool RegSet::tmpAllFree() const { // The 'tmpGetCount' should equal the number of things in the 'tmpUsed' lists. This is a convenient place // to assert that. unsigned usedCount = 0; for (TempDsc* temp = tmpListBeg(TEMP_USAGE_USED); temp != nullptr; temp = tmpListNxt(temp, TEMP_USAGE_USED)) { ++usedCount; } assert(usedCount == tmpGetCount); if (tmpGetCount != 0) { return false; } for (unsigned i = 0; i < ArrLen(tmpUsed); i++) { if (tmpUsed[i] != nullptr) { return false; } } return true; } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Register-related utility functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * Given a register that is an argument register * returns the next argument register * * Note: that this method will return a non arg register * when given REG_ARG_LAST * */ regNumber genRegArgNext(regNumber argReg) { assert(isValidIntArgReg(argReg) || isValidFloatArgReg(argReg)); switch (argReg) { #ifdef TARGET_AMD64 #ifdef UNIX_AMD64_ABI // Linux x64 ABI: REG_RDI, REG_RSI, REG_RDX, REG_RCX, REG_R8, REG_R9 case REG_ARG_0: // REG_RDI return REG_ARG_1; // REG_RSI case REG_ARG_1: // REG_RSI return REG_ARG_2; // REG_RDX case REG_ARG_2: // REG_RDX return REG_ARG_3; // REG_RCX case REG_ARG_3: // REG_RCX return REG_ARG_4; // REG_R8 #else // !UNIX_AMD64_ABI // Windows x64 ABI: REG_RCX, REG_RDX, REG_R8, REG_R9 case REG_ARG_1: // REG_RDX return REG_ARG_2; // REG_R8 #endif // !UNIX_AMD64_ABI #endif // TARGET_AMD64 default: return REG_NEXT(argReg); } } /***************************************************************************** * * The following table determines the order in which callee-saved registers * are encoded in GC information at call sites (perhaps among other things). * In any case, they establish a mapping from ordinal callee-save reg "indices" to * register numbers and corresponding bitmaps. */ const regNumber raRegCalleeSaveOrder[] = {REG_CALLEE_SAVED_ORDER}; const regMaskTP raRbmCalleeSaveOrder[] = {RBM_CALLEE_SAVED_ORDER}; regMaskSmall genRegMaskFromCalleeSavedMask(unsigned short calleeSaveMask) { regMaskSmall res = 0; for (int i = 0; i < CNT_CALLEE_SAVED; i++) { if ((calleeSaveMask & ((regMaskTP)1 << i)) != 0) { res |= raRbmCalleeSaveOrder[i]; } } return res; } /***************************************************************************** * * Initializes the spill code. Should be called once per function compiled. */ // inline void RegSet::rsSpillInit() { /* Clear out the spill and multi-use tables */ memset(rsSpillDesc, 0, sizeof(rsSpillDesc)); rsNeededSpillReg = false; /* We don't have any descriptors allocated */ rsSpillFree = nullptr; } /***************************************************************************** * * Shuts down the spill code. Should be called once per function compiled. */ // inline void RegSet::rsSpillDone() { rsSpillChk(); } /***************************************************************************** * * Begin tracking spills - should be called each time before a pass is made * over a function body. */ // inline void RegSet::rsSpillBeg() { rsSpillChk(); } /***************************************************************************** * * Finish tracking spills - should be called each time after a pass is made * over a function body. */ // inline void RegSet::rsSpillEnd() { rsSpillChk(); } //**************************************************************************** // Create a new SpillDsc or get one off the free list // // inline RegSet::SpillDsc* RegSet::SpillDsc::alloc(Compiler* pComp, RegSet* regSet, var_types type) { RegSet::SpillDsc* spill; RegSet::SpillDsc** pSpill; pSpill = &(regSet->rsSpillFree); // Allocate spill structure if (*pSpill) { spill = *pSpill; *pSpill = spill->spillNext; } else { spill = pComp->getAllocator().allocate<SpillDsc>(1); } return spill; } //**************************************************************************** // Free a SpillDsc and return it to the rsSpillFree list // // inline void RegSet::SpillDsc::freeDsc(RegSet* regSet, RegSet::SpillDsc* spillDsc) { spillDsc->spillNext = regSet->rsSpillFree; regSet->rsSpillFree = spillDsc; } /***************************************************************************** * * Make sure no spills are currently active - used for debugging of the code * generator. */ #ifdef DEBUG // inline void RegSet::rsSpillChk() { // All grabbed temps should have been released assert(tmpGetCount == 0); for (regNumber reg = REG_FIRST; reg < REG_COUNT; reg = REG_NEXT(reg)) { assert(rsSpillDesc[reg] == nullptr); } } #else // inline void RegSet::rsSpillChk() { } #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX RegSet XX XX XX XX Represents the register set, and their states during code generation XX XX Can select an unused register, keeps track of the contents of the XX XX registers, and can spill registers XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ #include "jitpch.h" #ifdef _MSC_VER #pragma hdrstop #endif #include "emit.h" /*****************************************************************************/ #ifdef TARGET_ARM64 const regMaskSmall regMasks[] = { #define REGDEF(name, rnum, mask, xname, wname) mask, #include "register.h" }; #else // !TARGET_ARM64 const regMaskSmall regMasks[] = { #define REGDEF(name, rnum, mask, sname) mask, #include "register.h" }; #endif /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX RegSet XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ //------------------------------------------------------------------------ // verifyRegUsed: verify that the register is marked as used. // // Arguments: // reg - The register to verify. // // Return Value: // None. // // Assumptions: // The caller must have ensured that the register is already marked // as used. // // Notes: // This method is intended to be called during code generation, and // should simply validate that the register (or registers) have // already been added to the modified set. void RegSet::verifyRegUsed(regNumber reg) { // TODO-Cleanup: we need to identify the places where the register // is not marked as used when this is called. rsSetRegsModified(genRegMask(reg)); } //------------------------------------------------------------------------ // verifyRegistersUsed: verify that the registers are marked as used. // // Arguments: // regs - The registers to verify. // // Return Value: // None. // // Assumptions: // The caller must have ensured that the registers are already marked // as used. // // Notes: // This method is intended to be called during code generation, and // should simply validate that the register (or registers) have // already been added to the modified set. void RegSet::verifyRegistersUsed(regMaskTP regMask) { if (m_rsCompiler->opts.OptimizationDisabled()) { return; } if (regMask == RBM_NONE) { return; } // TODO-Cleanup: we need to identify the places where the registers // are not marked as used when this is called. rsSetRegsModified(regMask); } void RegSet::rsClearRegsModified() { assert(m_rsCompiler->lvaDoneFrameLayout < Compiler::FINAL_FRAME_LAYOUT); #ifdef DEBUG if (m_rsCompiler->verbose) { printf("Clearing modified regs.\n"); } rsModifiedRegsMaskInitialized = true; #endif // DEBUG rsModifiedRegsMask = RBM_NONE; } void RegSet::rsSetRegsModified(regMaskTP mask DEBUGARG(bool suppressDump)) { assert(mask != RBM_NONE); assert(rsModifiedRegsMaskInitialized); // We can't update the modified registers set after final frame layout (that is, during code // generation and after). Ignore prolog and epilog generation: they call register tracking to // modify rbp, for example, even in functions that use rbp as a frame pointer. Make sure normal // code generation isn't actually adding to set of modified registers. // Frame layout is only affected by callee-saved registers, so only ensure that callee-saved // registers aren't modified after final frame layout. assert((m_rsCompiler->lvaDoneFrameLayout < Compiler::FINAL_FRAME_LAYOUT) || m_rsCompiler->compGeneratingProlog || m_rsCompiler->compGeneratingEpilog || (((rsModifiedRegsMask | mask) & RBM_CALLEE_SAVED) == (rsModifiedRegsMask & RBM_CALLEE_SAVED))); #ifdef DEBUG if (m_rsCompiler->verbose && !suppressDump) { if (rsModifiedRegsMask != (rsModifiedRegsMask | mask)) { printf("Marking regs modified: "); dspRegMask(mask); printf(" ("); dspRegMask(rsModifiedRegsMask); printf(" => "); dspRegMask(rsModifiedRegsMask | mask); printf(")\n"); } } #endif // DEBUG rsModifiedRegsMask |= mask; } void RegSet::rsRemoveRegsModified(regMaskTP mask) { assert(mask != RBM_NONE); assert(rsModifiedRegsMaskInitialized); // See comment in rsSetRegsModified(). assert((m_rsCompiler->lvaDoneFrameLayout < Compiler::FINAL_FRAME_LAYOUT) || m_rsCompiler->compGeneratingProlog || m_rsCompiler->compGeneratingEpilog || (((rsModifiedRegsMask & ~mask) & RBM_CALLEE_SAVED) == (rsModifiedRegsMask & RBM_CALLEE_SAVED))); #ifdef DEBUG if (m_rsCompiler->verbose) { printf("Removing modified regs: "); dspRegMask(mask); if (rsModifiedRegsMask == (rsModifiedRegsMask & ~mask)) { printf(" (unchanged)"); } else { printf(" ("); dspRegMask(rsModifiedRegsMask); printf(" => "); dspRegMask(rsModifiedRegsMask & ~mask); printf(")"); } printf("\n"); } #endif // DEBUG rsModifiedRegsMask &= ~mask; } void RegSet::SetMaskVars(regMaskTP newMaskVars) { #ifdef DEBUG if (m_rsCompiler->verbose) { printf("\t\t\t\t\t\t\tLive regs: "); if (_rsMaskVars == newMaskVars) { printf("(unchanged) "); } else { printRegMaskInt(_rsMaskVars); m_rsCompiler->GetEmitter()->emitDispRegSet(_rsMaskVars); printf(" => "); } printRegMaskInt(newMaskVars); m_rsCompiler->GetEmitter()->emitDispRegSet(newMaskVars); printf("\n"); } #endif // DEBUG _rsMaskVars = newMaskVars; } /*****************************************************************************/ RegSet::RegSet(Compiler* compiler, GCInfo& gcInfo) : m_rsCompiler(compiler), m_rsGCInfo(gcInfo) { /* Initialize the spill logic */ rsSpillInit(); /* Initialize the argument register count */ // TODO-Cleanup: Consider moving intRegState and floatRegState to RegSet. They used // to be initialized here, but are now initialized in the CodeGen constructor. // intRegState.rsCurRegArgNum = 0; // loatRegState.rsCurRegArgNum = 0; rsMaskResvd = RBM_NONE; #ifdef TARGET_ARMARCH rsMaskCalleeSaved = RBM_NONE; #endif // TARGET_ARMARCH #ifdef TARGET_ARM rsMaskPreSpillRegArg = RBM_NONE; rsMaskPreSpillAlign = RBM_NONE; #endif #ifdef DEBUG rsModifiedRegsMaskInitialized = false; #endif // DEBUG } /***************************************************************************** * * Finds the SpillDsc corresponding to 'tree' assuming it was spilled from 'reg'. */ RegSet::SpillDsc* RegSet::rsGetSpillInfo(GenTree* tree, regNumber reg, SpillDsc** pPrevDsc) { /* Normally, trees are unspilled in the order of being spilled due to the post-order walking of trees during code-gen. However, this will not be true for something like a GT_ARR_ELEM node */ SpillDsc* prev; SpillDsc* dsc; for (prev = nullptr, dsc = rsSpillDesc[reg]; dsc != nullptr; prev = dsc, dsc = dsc->spillNext) { if (dsc->spillTree == tree) { break; } } if (pPrevDsc) { *pPrevDsc = prev; } return dsc; } //------------------------------------------------------------ // rsSpillTree: Spill the tree held in 'reg'. // // Arguments: // reg - Register of tree node that is to be spilled // tree - GenTree node that is being spilled // regIdx - Register index identifying the specific result // register of a multi-reg call node. For single-reg // producing tree nodes its value is zero. // // Return Value: // None. // // Notes: // For multi-reg nodes, only the spill flag associated with this reg is cleared. // The spill flag on the node should be cleared by the caller of this method. // void RegSet::rsSpillTree(regNumber reg, GenTree* tree, unsigned regIdx /* =0 */) { assert(tree != nullptr); GenTreeCall* call = nullptr; GenTreeLclVar* lcl = nullptr; var_types treeType; #if defined(TARGET_ARM) GenTreePutArgSplit* splitArg = nullptr; GenTreeMultiRegOp* multiReg = nullptr; #endif if (tree->IsMultiRegCall()) { call = tree->AsCall(); const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc(); treeType = retTypeDesc->GetReturnRegType(regIdx); } #ifdef TARGET_ARM else if (tree->OperIsPutArgSplit()) { splitArg = tree->AsPutArgSplit(); treeType = splitArg->GetRegType(regIdx); } else if (tree->OperIsMultiRegOp()) { multiReg = tree->AsMultiRegOp(); treeType = multiReg->GetRegType(regIdx); } #endif // TARGET_ARM else if (tree->IsMultiRegLclVar()) { LclVarDsc* varDsc = m_rsCompiler->lvaGetDesc(tree->AsLclVar()); treeType = varDsc->TypeGet(); } else { treeType = tree->TypeGet(); } var_types tempType = RegSet::tmpNormalizeType(treeType); regMaskTP mask; bool floatSpill = false; if (isFloatRegType(treeType)) { floatSpill = true; mask = genRegMaskFloat(reg, treeType); } else { mask = genRegMask(reg); } rsNeededSpillReg = true; // We should only be spilling nodes marked for spill, // vars should be handled elsewhere, and to prevent // spilling twice clear GTF_SPILL flag on tree node. // // In case of multi-reg nodes, only the spill flag associated with this reg is cleared. // The spill flag on the node should be cleared by the caller of this method. assert((tree->gtFlags & GTF_SPILL) != 0); GenTreeFlags regFlags = GTF_EMPTY; if (call != nullptr) { regFlags = call->GetRegSpillFlagByIdx(regIdx); assert((regFlags & GTF_SPILL) != 0); regFlags &= ~GTF_SPILL; } #ifdef TARGET_ARM else if (splitArg != nullptr) { regFlags = splitArg->GetRegSpillFlagByIdx(regIdx); assert((regFlags & GTF_SPILL) != 0); regFlags &= ~GTF_SPILL; } else if (multiReg != nullptr) { regFlags = multiReg->GetRegSpillFlagByIdx(regIdx); assert((regFlags & GTF_SPILL) != 0); regFlags &= ~GTF_SPILL; } #endif // TARGET_ARM else if (lcl != nullptr) { regFlags = lcl->GetRegSpillFlagByIdx(regIdx); assert((regFlags & GTF_SPILL) != 0); regFlags &= ~GTF_SPILL; } else { assert(!varTypeIsMultiReg(tree)); tree->gtFlags &= ~GTF_SPILL; } #if defined(TARGET_ARM) assert(tree->GetRegNum() == reg || (call != nullptr && call->GetRegNumByIdx(regIdx) == reg) || (splitArg != nullptr && splitArg->GetRegNumByIdx(regIdx) == reg) || (multiReg != nullptr && multiReg->GetRegNumByIdx(regIdx) == reg)); #else assert(tree->GetRegNum() == reg || (call != nullptr && call->GetRegNumByIdx(regIdx) == reg)); #endif // !TARGET_ARM // Are any registers free for spillage? SpillDsc* spill = SpillDsc::alloc(m_rsCompiler, this, tempType); // Grab a temp to store the spilled value TempDsc* temp = tmpGetTemp(tempType); spill->spillTemp = temp; tempType = temp->tdTempType(); // Remember what it is we have spilled spill->spillTree = tree; #ifdef DEBUG if (m_rsCompiler->verbose) { printf("\t\t\t\t\t\t\tThe register %s spilled with ", m_rsCompiler->compRegVarName(reg)); Compiler::printTreeID(spill->spillTree); } #endif // 'lastDsc' is 'spill' for simple cases, and will point to the last // multi-use descriptor if 'reg' is being multi-used SpillDsc* lastDsc = spill; // Insert the spill descriptor(s) in the list lastDsc->spillNext = rsSpillDesc[reg]; rsSpillDesc[reg] = spill; #ifdef DEBUG if (m_rsCompiler->verbose) { printf("\n"); } #endif // Generate the code to spill the register var_types storeType = floatSpill ? treeType : tempType; m_rsCompiler->codeGen->spillReg(storeType, temp, reg); // Mark the tree node as having been spilled rsMarkSpill(tree, reg); // In case of multi-reg call node also mark the specific // result reg as spilled. if (call != nullptr) { regFlags |= GTF_SPILLED; call->SetRegSpillFlagByIdx(regFlags, regIdx); } #ifdef TARGET_ARM else if (splitArg != nullptr) { regFlags |= GTF_SPILLED; splitArg->SetRegSpillFlagByIdx(regFlags, regIdx); } else if (multiReg != nullptr) { regFlags |= GTF_SPILLED; multiReg->SetRegSpillFlagByIdx(regFlags, regIdx); } #endif // TARGET_ARM else if (lcl != nullptr) { regFlags |= GTF_SPILLED; lcl->SetRegSpillFlagByIdx(regFlags, regIdx); } } #if defined(TARGET_X86) /***************************************************************************** * * Spill the top of the FP x87 stack. */ void RegSet::rsSpillFPStack(GenTreeCall* call) { SpillDsc* spill; TempDsc* temp; var_types treeType = call->TypeGet(); spill = SpillDsc::alloc(m_rsCompiler, this, treeType); /* Grab a temp to store the spilled value */ spill->spillTemp = temp = tmpGetTemp(treeType); /* Remember what it is we have spilled */ spill->spillTree = call; SpillDsc* lastDsc = spill; regNumber reg = call->GetRegNum(); lastDsc->spillNext = rsSpillDesc[reg]; rsSpillDesc[reg] = spill; #ifdef DEBUG if (m_rsCompiler->verbose) printf("\n"); #endif m_rsCompiler->codeGen->GetEmitter()->emitIns_S(INS_fstp, emitActualTypeSize(treeType), temp->tdTempNum(), 0); /* Mark the tree node as having been spilled */ rsMarkSpill(call, reg); } #endif // defined(TARGET_X86) /***************************************************************************** * * Get the temp that was spilled from the given register (and free its * spill descriptor while we're at it). Returns the temp (i.e. local var) */ TempDsc* RegSet::rsGetSpillTempWord(regNumber reg, SpillDsc* dsc, SpillDsc* prevDsc) { assert((prevDsc == nullptr) || (prevDsc->spillNext == dsc)); /* Remove this spill entry from the register's list */ (prevDsc ? prevDsc->spillNext : rsSpillDesc[reg]) = dsc->spillNext; /* Remember which temp the value is in */ TempDsc* temp = dsc->spillTemp; SpillDsc::freeDsc(this, dsc); /* return the temp variable */ return temp; } //--------------------------------------------------------------------- // rsUnspillInPlace: The given tree operand has been spilled; just mark // it as unspilled so that we can use it as "normal" local. // // Arguments: // tree - GenTree that needs to be marked as unspilled. // oldReg - reg of tree that was spilled. // // Return Value: // None. // // Assumptions: // 1. It is the responsibility of the caller to free the spill temp. // 2. RyuJIT backend specific: In case of multi-reg call node // GTF_SPILLED flag associated with reg is cleared. It is the // responsibility of caller to clear GTF_SPILLED flag on call node // itself after ensuring there are no outstanding regs in GTF_SPILLED // state. // TempDsc* RegSet::rsUnspillInPlace(GenTree* tree, regNumber oldReg, unsigned regIdx /* =0 */) { // Get the tree's SpillDsc SpillDsc* prevDsc; SpillDsc* spillDsc = rsGetSpillInfo(tree, oldReg, &prevDsc); PREFIX_ASSUME(spillDsc != nullptr); // Get the temp TempDsc* temp = rsGetSpillTempWord(oldReg, spillDsc, prevDsc); // The value is now unspilled if (tree->IsMultiRegCall()) { GenTreeCall* call = tree->AsCall(); GenTreeFlags flags = call->GetRegSpillFlagByIdx(regIdx); flags &= ~GTF_SPILLED; call->SetRegSpillFlagByIdx(flags, regIdx); } #if defined(TARGET_ARM) else if (tree->OperIsPutArgSplit()) { GenTreePutArgSplit* splitArg = tree->AsPutArgSplit(); GenTreeFlags flags = splitArg->GetRegSpillFlagByIdx(regIdx); flags &= ~GTF_SPILLED; splitArg->SetRegSpillFlagByIdx(flags, regIdx); } else if (tree->OperIsMultiRegOp()) { GenTreeMultiRegOp* multiReg = tree->AsMultiRegOp(); GenTreeFlags flags = multiReg->GetRegSpillFlagByIdx(regIdx); flags &= ~GTF_SPILLED; multiReg->SetRegSpillFlagByIdx(flags, regIdx); } #endif // TARGET_ARM else if (tree->IsMultiRegLclVar()) { GenTreeLclVar* lcl = tree->AsLclVar(); GenTreeFlags flags = lcl->GetRegSpillFlagByIdx(regIdx); flags &= ~GTF_SPILLED; lcl->SetRegSpillFlagByIdx(flags, regIdx); } else { tree->gtFlags &= ~GTF_SPILLED; } #ifdef DEBUG if (m_rsCompiler->verbose) { printf("\t\t\t\t\t\t\tTree-Node marked unspilled from "); Compiler::printTreeID(tree); printf("\n"); } #endif return temp; } void RegSet::rsMarkSpill(GenTree* tree, regNumber reg) { tree->gtFlags |= GTF_SPILLED; } /*****************************************************************************/ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX TempsInfo XX XX XX XX The temporary lclVars allocated by the compiler for code generation XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ void RegSet::tmpInit() { tmpCount = 0; tmpSize = UINT_MAX; #ifdef DEBUG tmpGetCount = 0; #endif memset(tmpFree, 0, sizeof(tmpFree)); memset(tmpUsed, 0, sizeof(tmpUsed)); } /* static */ var_types RegSet::tmpNormalizeType(var_types type) { type = genActualType(type); #if defined(FEATURE_SIMD) // We always spill SIMD12 to a 16-byte SIMD16 temp. // This is because we don't have a single instruction to store 12 bytes, so we want // to ensure that we always have the full 16 bytes for loading & storing the value. // We also allocate non-argument locals as 16 bytes; see lvSize(). if (type == TYP_SIMD12) { type = TYP_SIMD16; } #endif // defined(FEATURE_SIMD) && !defined(TARGET_64BIT) return type; } /***************************************************************************** * * Allocate a temp of the given size (and type, if tracking pointers for * the garbage collector). */ TempDsc* RegSet::tmpGetTemp(var_types type) { type = tmpNormalizeType(type); unsigned size = genTypeSize(type); // If TYP_STRUCT ever gets in here we do bad things (tmpSlot returns -1) noway_assert(size >= sizeof(int)); /* Find the slot to search for a free temp of the right size */ unsigned slot = tmpSlot(size); /* Look for a temp with a matching type */ TempDsc** last = &tmpFree[slot]; TempDsc* temp; for (temp = *last; temp; last = &temp->tdNext, temp = *last) { /* Does the type match? */ if (temp->tdTempType() == type) { /* We have a match -- remove it from the free list */ *last = temp->tdNext; break; } } #ifdef DEBUG /* Do we need to allocate a new temp */ bool isNewTemp = false; #endif // DEBUG noway_assert(temp != nullptr); #ifdef DEBUG if (m_rsCompiler->verbose) { printf("%s temp #%u, slot %u, size = %u\n", isNewTemp ? "created" : "reused", -temp->tdTempNum(), slot, temp->tdTempSize()); } tmpGetCount++; #endif // DEBUG temp->tdNext = tmpUsed[slot]; tmpUsed[slot] = temp; return temp; } /***************************************************************************** * Preallocate 'count' temps of type 'type'. This type must be a normalized * type (by the definition of tmpNormalizeType()). * * This is used at the end of LSRA, which knows precisely the maximum concurrent * number of each type of spill temp needed, before code generation. Code generation * then uses these preallocated temp. If code generation ever asks for more than * has been preallocated, it is a fatal error. */ void RegSet::tmpPreAllocateTemps(var_types type, unsigned count) { assert(type == tmpNormalizeType(type)); unsigned size = genTypeSize(type); // If TYP_STRUCT ever gets in here we do bad things (tmpSlot returns -1) noway_assert(size >= sizeof(int)); // Find the slot to search for a free temp of the right size. // Note that slots are shared by types of the identical size (e.g., TYP_REF and TYP_LONG on AMD64), // so we can't assert that the slot is empty when we get here. unsigned slot = tmpSlot(size); for (unsigned i = 0; i < count; i++) { tmpCount++; tmpSize += size; #ifdef TARGET_ARM if (type == TYP_DOUBLE) { // Adjust tmpSize to accommodate possible alignment padding. // Note that at this point the offsets aren't yet finalized, so we don't yet know if it will be required. tmpSize += TARGET_POINTER_SIZE; } #endif // TARGET_ARM TempDsc* temp = new (m_rsCompiler, CMK_Unknown) TempDsc(-((int)tmpCount), size, type); #ifdef DEBUG if (m_rsCompiler->verbose) { printf("pre-allocated temp #%u, slot %u, size = %u\n", -temp->tdTempNum(), slot, temp->tdTempSize()); } #endif // DEBUG // Add it to the front of the appropriate slot list. temp->tdNext = tmpFree[slot]; tmpFree[slot] = temp; } } /***************************************************************************** * * Release the given temp. */ void RegSet::tmpRlsTemp(TempDsc* temp) { assert(temp != nullptr); unsigned slot; /* Add the temp to the 'free' list */ slot = tmpSlot(temp->tdTempSize()); #ifdef DEBUG if (m_rsCompiler->verbose) { printf("release temp #%u, slot %u, size = %u\n", -temp->tdTempNum(), slot, temp->tdTempSize()); } assert(tmpGetCount); tmpGetCount--; #endif // Remove it from the 'used' list. TempDsc** last = &tmpUsed[slot]; TempDsc* t; for (t = *last; t != nullptr; last = &t->tdNext, t = *last) { if (t == temp) { /* Found it! -- remove it from the 'used' list */ *last = t->tdNext; break; } } assert(t != nullptr); // We better have found it! // Add it to the free list. temp->tdNext = tmpFree[slot]; tmpFree[slot] = temp; } /***************************************************************************** * Given a temp number, find the corresponding temp. * * When looking for temps on the "free" list, this can only be used after code generation. (This is * simply because we have an assert to that effect in tmpListBeg(); we could relax that, or hoist * the assert to the appropriate callers.) * * When looking for temps on the "used" list, this can be used any time. */ TempDsc* RegSet::tmpFindNum(int tnum, TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE */) const { assert(tnum < 0); // temp numbers are negative for (TempDsc* temp = tmpListBeg(usageType); temp != nullptr; temp = tmpListNxt(temp, usageType)) { if (temp->tdTempNum() == tnum) { return temp; } } return nullptr; } /***************************************************************************** * * A helper function is used to iterate over all the temps. */ TempDsc* RegSet::tmpListBeg(TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE */) const { TempDsc* const* tmpLists; if (usageType == TEMP_USAGE_FREE) { tmpLists = tmpFree; } else { tmpLists = tmpUsed; } // Return the first temp in the slot for the smallest size unsigned slot = 0; while (slot < (TEMP_SLOT_COUNT - 1) && tmpLists[slot] == nullptr) { slot++; } TempDsc* temp = tmpLists[slot]; return temp; } /***************************************************************************** * Used with tmpListBeg() to iterate over the list of temps. */ TempDsc* RegSet::tmpListNxt(TempDsc* curTemp, TEMP_USAGE_TYPE usageType /* = TEMP_USAGE_FREE */) const { assert(curTemp != nullptr); TempDsc* temp = curTemp->tdNext; if (temp == nullptr) { unsigned size = curTemp->tdTempSize(); // If there are no more temps in the list, check if there are more // slots (for bigger sized temps) to walk. TempDsc* const* tmpLists; if (usageType == TEMP_USAGE_FREE) { tmpLists = tmpFree; } else { tmpLists = tmpUsed; } while (size < TEMP_MAX_SIZE && temp == nullptr) { size += sizeof(int); unsigned slot = tmpSlot(size); temp = tmpLists[slot]; } assert((temp == nullptr) || (temp->tdTempSize() == size)); } return temp; } #ifdef DEBUG /***************************************************************************** * Return 'true' if all allocated temps are free (not in use). */ bool RegSet::tmpAllFree() const { // The 'tmpGetCount' should equal the number of things in the 'tmpUsed' lists. This is a convenient place // to assert that. unsigned usedCount = 0; for (TempDsc* temp = tmpListBeg(TEMP_USAGE_USED); temp != nullptr; temp = tmpListNxt(temp, TEMP_USAGE_USED)) { ++usedCount; } assert(usedCount == tmpGetCount); if (tmpGetCount != 0) { return false; } for (unsigned i = 0; i < ArrLen(tmpUsed); i++) { if (tmpUsed[i] != nullptr) { return false; } } return true; } #endif // DEBUG /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XX Register-related utility functions XX XX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ /***************************************************************************** * * Given a register that is an argument register * returns the next argument register * * Note: that this method will return a non arg register * when given REG_ARG_LAST * */ regNumber genRegArgNext(regNumber argReg) { assert(isValidIntArgReg(argReg) || isValidFloatArgReg(argReg)); switch (argReg) { #ifdef TARGET_AMD64 #ifdef UNIX_AMD64_ABI // Linux x64 ABI: REG_RDI, REG_RSI, REG_RDX, REG_RCX, REG_R8, REG_R9 case REG_ARG_0: // REG_RDI return REG_ARG_1; // REG_RSI case REG_ARG_1: // REG_RSI return REG_ARG_2; // REG_RDX case REG_ARG_2: // REG_RDX return REG_ARG_3; // REG_RCX case REG_ARG_3: // REG_RCX return REG_ARG_4; // REG_R8 #else // !UNIX_AMD64_ABI // Windows x64 ABI: REG_RCX, REG_RDX, REG_R8, REG_R9 case REG_ARG_1: // REG_RDX return REG_ARG_2; // REG_R8 #endif // !UNIX_AMD64_ABI #endif // TARGET_AMD64 default: return REG_NEXT(argReg); } } /***************************************************************************** * * The following table determines the order in which callee-saved registers * are encoded in GC information at call sites (perhaps among other things). * In any case, they establish a mapping from ordinal callee-save reg "indices" to * register numbers and corresponding bitmaps. */ const regNumber raRegCalleeSaveOrder[] = {REG_CALLEE_SAVED_ORDER}; const regMaskTP raRbmCalleeSaveOrder[] = {RBM_CALLEE_SAVED_ORDER}; regMaskSmall genRegMaskFromCalleeSavedMask(unsigned short calleeSaveMask) { regMaskSmall res = 0; for (int i = 0; i < CNT_CALLEE_SAVED; i++) { if ((calleeSaveMask & ((regMaskTP)1 << i)) != 0) { res |= raRbmCalleeSaveOrder[i]; } } return res; } /***************************************************************************** * * Initializes the spill code. Should be called once per function compiled. */ // inline void RegSet::rsSpillInit() { /* Clear out the spill and multi-use tables */ memset(rsSpillDesc, 0, sizeof(rsSpillDesc)); rsNeededSpillReg = false; /* We don't have any descriptors allocated */ rsSpillFree = nullptr; } /***************************************************************************** * * Shuts down the spill code. Should be called once per function compiled. */ // inline void RegSet::rsSpillDone() { rsSpillChk(); } /***************************************************************************** * * Begin tracking spills - should be called each time before a pass is made * over a function body. */ // inline void RegSet::rsSpillBeg() { rsSpillChk(); } /***************************************************************************** * * Finish tracking spills - should be called each time after a pass is made * over a function body. */ // inline void RegSet::rsSpillEnd() { rsSpillChk(); } //**************************************************************************** // Create a new SpillDsc or get one off the free list // // inline RegSet::SpillDsc* RegSet::SpillDsc::alloc(Compiler* pComp, RegSet* regSet, var_types type) { RegSet::SpillDsc* spill; RegSet::SpillDsc** pSpill; pSpill = &(regSet->rsSpillFree); // Allocate spill structure if (*pSpill) { spill = *pSpill; *pSpill = spill->spillNext; } else { spill = pComp->getAllocator().allocate<SpillDsc>(1); } return spill; } //**************************************************************************** // Free a SpillDsc and return it to the rsSpillFree list // // inline void RegSet::SpillDsc::freeDsc(RegSet* regSet, RegSet::SpillDsc* spillDsc) { spillDsc->spillNext = regSet->rsSpillFree; regSet->rsSpillFree = spillDsc; } /***************************************************************************** * * Make sure no spills are currently active - used for debugging of the code * generator. */ #ifdef DEBUG // inline void RegSet::rsSpillChk() { // All grabbed temps should have been released assert(tmpGetCount == 0); for (regNumber reg = REG_FIRST; reg < REG_COUNT; reg = REG_NEXT(reg)) { assert(rsSpillDesc[reg] == nullptr); } } #else // inline void RegSet::rsSpillChk() { } #endif
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/libraries/Microsoft.Extensions.FileSystemGlobbing/src/Abstractions/DirectoryInfoWrapper.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.IO; namespace Microsoft.Extensions.FileSystemGlobbing.Abstractions { /// <summary> /// Wraps an instance of <see cref="System.IO.DirectoryInfo" /> and provides implementation of /// <see cref="DirectoryInfoBase" />. /// </summary> public class DirectoryInfoWrapper : DirectoryInfoBase { private readonly DirectoryInfo _directoryInfo; private readonly bool _isParentPath; /// <summary> /// Initializes an instance of <see cref="DirectoryInfoWrapper" />. /// </summary> /// <param name="directoryInfo">The <see cref="DirectoryInfo" />.</param> public DirectoryInfoWrapper(DirectoryInfo directoryInfo) : this(directoryInfo, isParentPath: false) { } private DirectoryInfoWrapper(DirectoryInfo directoryInfo, bool isParentPath) { _directoryInfo = directoryInfo; _isParentPath = isParentPath; } /// <inheritdoc /> public override IEnumerable<FileSystemInfoBase> EnumerateFileSystemInfos() { if (_directoryInfo.Exists) { IEnumerable<FileSystemInfo> fileSystemInfos; try { fileSystemInfos = _directoryInfo.EnumerateFileSystemInfos("*", SearchOption.TopDirectoryOnly); } catch (DirectoryNotFoundException) { yield break; } foreach (FileSystemInfo fileSystemInfo in fileSystemInfos) { if (fileSystemInfo is DirectoryInfo directoryInfo) { yield return new DirectoryInfoWrapper(directoryInfo); } else { yield return new FileInfoWrapper((FileInfo)fileSystemInfo); } } } } /// <summary> /// Returns an instance of <see cref="DirectoryInfoBase" /> that represents a subdirectory. /// </summary> /// <remarks> /// If <paramref name="name" /> equals '..', this returns the parent directory. /// </remarks> /// <param name="name">The directory name</param> /// <returns>The directory</returns> public override DirectoryInfoBase? GetDirectory(string name) { bool isParentPath = string.Equals(name, "..", StringComparison.Ordinal); if (isParentPath) { return new DirectoryInfoWrapper( new DirectoryInfo(Path.Combine(_directoryInfo.FullName, name)), isParentPath); } else { DirectoryInfo[] dirs = _directoryInfo.GetDirectories(name); if (dirs.Length == 1) { return new DirectoryInfoWrapper(dirs[0], isParentPath); } else if (dirs.Length == 0) { return null; } else { // This shouldn't happen. The parameter name isn't supposed to contain wild card. throw new InvalidOperationException( $"More than one sub directories are found under {_directoryInfo.FullName} with name {name}."); } } } /// <inheritdoc /> public override FileInfoBase GetFile(string name) => new FileInfoWrapper(new FileInfo(Path.Combine(_directoryInfo.FullName, name))); /// <inheritdoc /> public override string Name => _isParentPath ? ".." : _directoryInfo.Name; /// <summary> /// Returns the full path to the directory. /// </summary> /// <remarks> /// Equals the value of <seealso cref="System.IO.FileSystemInfo.FullName" />. /// </remarks> public override string FullName => _directoryInfo.FullName; /// <summary> /// Returns the parent directory. /// </summary> /// <remarks> /// Equals the value of <seealso cref="System.IO.DirectoryInfo.Parent" />. /// </remarks> public override DirectoryInfoBase? ParentDirectory => new DirectoryInfoWrapper(_directoryInfo.Parent!); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using System.IO; namespace Microsoft.Extensions.FileSystemGlobbing.Abstractions { /// <summary> /// Wraps an instance of <see cref="System.IO.DirectoryInfo" /> and provides implementation of /// <see cref="DirectoryInfoBase" />. /// </summary> public class DirectoryInfoWrapper : DirectoryInfoBase { private readonly DirectoryInfo _directoryInfo; private readonly bool _isParentPath; /// <summary> /// Initializes an instance of <see cref="DirectoryInfoWrapper" />. /// </summary> /// <param name="directoryInfo">The <see cref="DirectoryInfo" />.</param> public DirectoryInfoWrapper(DirectoryInfo directoryInfo) : this(directoryInfo, isParentPath: false) { } private DirectoryInfoWrapper(DirectoryInfo directoryInfo, bool isParentPath) { _directoryInfo = directoryInfo; _isParentPath = isParentPath; } /// <inheritdoc /> public override IEnumerable<FileSystemInfoBase> EnumerateFileSystemInfos() { if (_directoryInfo.Exists) { IEnumerable<FileSystemInfo> fileSystemInfos; try { fileSystemInfos = _directoryInfo.EnumerateFileSystemInfos("*", SearchOption.TopDirectoryOnly); } catch (DirectoryNotFoundException) { yield break; } foreach (FileSystemInfo fileSystemInfo in fileSystemInfos) { if (fileSystemInfo is DirectoryInfo directoryInfo) { yield return new DirectoryInfoWrapper(directoryInfo); } else { yield return new FileInfoWrapper((FileInfo)fileSystemInfo); } } } } /// <summary> /// Returns an instance of <see cref="DirectoryInfoBase" /> that represents a subdirectory. /// </summary> /// <remarks> /// If <paramref name="name" /> equals '..', this returns the parent directory. /// </remarks> /// <param name="name">The directory name</param> /// <returns>The directory</returns> public override DirectoryInfoBase? GetDirectory(string name) { bool isParentPath = string.Equals(name, "..", StringComparison.Ordinal); if (isParentPath) { return new DirectoryInfoWrapper( new DirectoryInfo(Path.Combine(_directoryInfo.FullName, name)), isParentPath); } else { DirectoryInfo[] dirs = _directoryInfo.GetDirectories(name); if (dirs.Length == 1) { return new DirectoryInfoWrapper(dirs[0], isParentPath); } else if (dirs.Length == 0) { return null; } else { // This shouldn't happen. The parameter name isn't supposed to contain wild card. throw new InvalidOperationException( $"More than one sub directories are found under {_directoryInfo.FullName} with name {name}."); } } } /// <inheritdoc /> public override FileInfoBase GetFile(string name) => new FileInfoWrapper(new FileInfo(Path.Combine(_directoryInfo.FullName, name))); /// <inheritdoc /> public override string Name => _isParentPath ? ".." : _directoryInfo.Name; /// <summary> /// Returns the full path to the directory. /// </summary> /// <remarks> /// Equals the value of <seealso cref="System.IO.FileSystemInfo.FullName" />. /// </remarks> public override string FullName => _directoryInfo.FullName; /// <summary> /// Returns the parent directory. /// </summary> /// <remarks> /// Equals the value of <seealso cref="System.IO.DirectoryInfo.Parent" />. /// </remarks> public override DirectoryInfoBase? ParentDirectory => new DirectoryInfoWrapper(_directoryInfo.Parent!); } }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/JIT/Methodical/int64/misc/binop.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { } .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly 'binop'// as "binop" { } .assembly extern xunit.core {} // MVID: {E24B38CB-1AB3-45BA-AA54-5C10DD72CDBA} .namespace JitTest { .class private auto ansi Test extends ['mscorlib']System.Object { .method private hidebysig static int32 Main() il managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint // Code size 196 (0xc4) .maxstack 2 .locals (int64 V_0, int64 V_1, unsigned int64 V_2, unsigned int64 V_3, int32 V_4) .try { IL_0000: ldc.i8 0x7000123480001234 IL_0009: stloc.0 IL_000a: ldc.i8 0x7123400081234000 IL_0013: stloc.1 IL_0014: ldloc.0 IL_0015: ldloc.1 IL_0016: and IL_0017: ldc.i8 0x7000000080000000 IL_0020: beq.s IL_0027 IL_0022: leave IL_00b2 IL_0027: ldc.i8 0x7000123480001234 IL_0030: stloc.0 IL_0031: ldc.i8 0x7123400081234000 IL_003a: stloc.1 IL_003b: ldloc.0 IL_003c: ldloc.1 IL_003d: or IL_003e: ldc.i8 0x7123523481235234 IL_0047: beq.s IL_004b IL_0049: leave.s IL_00b2 IL_004b: ldc.i8 0x8000123480001234 IL_0054: stloc.2 IL_0055: ldc.i8 0x8123400081234000 IL_005e: stloc.3 IL_005f: ldloc.2 IL_0060: ldloc.3 IL_0061: and IL_0062: not IL_0063: ldc.i8 0x7fffffff7fffffff IL_006c: beq.s IL_0070 IL_006e: leave.s IL_00b2 IL_0070: ldc.i8 0x8000123480001234 IL_0079: stloc.2 IL_007a: ldc.i8 0x8123400081234000 IL_0083: stloc.3 IL_0084: ldloc.2 IL_0085: ldloc.3 IL_0086: or IL_0087: ldc.i8 0x8123523481235234 IL_0090: beq.s IL_0094 IL_0092: leave.s IL_00b2 IL_0094: leave.s IL_00a3 } // end .try catch ['mscorlib']System.Exception { IL_0096: pop IL_0097: ldstr "Exception handled!" IL_009c: call void [System.Console]System.Console::WriteLine(class System.String) IL_00a1: leave.s IL_00b2 } // end handler IL_00a3: ldstr "Passed" IL_00a8: call void [System.Console]System.Console::WriteLine(class System.String) IL_00ad: ldc.i4 0x64 IL_00ae: stloc.s V_4 IL_00b0: br.s IL_00c1 IL_00b2: ldstr "Failed" IL_00b7: call void [System.Console]System.Console::WriteLine(class System.String) IL_00bc: ldc.i4.1 IL_00bd: stloc.s V_4 IL_00c1: ldloc.s V_4 IL_00c3: ret } // end of method 'Test::Main' .method public hidebysig specialname rtspecialname instance void .ctor() il managed { // Code size 7 (0x7) .maxstack 8 IL_0000: ldarg.0 IL_0001: call instance void ['mscorlib']System.Object::.ctor() IL_0006: ret } // end of method 'Test::.ctor' } // end of class 'Test' } // end of namespace 'JitTest' //*********** DISASSEMBLY COMPLETE ***********************
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. .assembly extern mscorlib { } .assembly extern System.Console { .publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A ) .ver 4:0:0:0 } .assembly 'binop'// as "binop" { } .assembly extern xunit.core {} // MVID: {E24B38CB-1AB3-45BA-AA54-5C10DD72CDBA} .namespace JitTest { .class private auto ansi Test extends ['mscorlib']System.Object { .method private hidebysig static int32 Main() il managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint // Code size 196 (0xc4) .maxstack 2 .locals (int64 V_0, int64 V_1, unsigned int64 V_2, unsigned int64 V_3, int32 V_4) .try { IL_0000: ldc.i8 0x7000123480001234 IL_0009: stloc.0 IL_000a: ldc.i8 0x7123400081234000 IL_0013: stloc.1 IL_0014: ldloc.0 IL_0015: ldloc.1 IL_0016: and IL_0017: ldc.i8 0x7000000080000000 IL_0020: beq.s IL_0027 IL_0022: leave IL_00b2 IL_0027: ldc.i8 0x7000123480001234 IL_0030: stloc.0 IL_0031: ldc.i8 0x7123400081234000 IL_003a: stloc.1 IL_003b: ldloc.0 IL_003c: ldloc.1 IL_003d: or IL_003e: ldc.i8 0x7123523481235234 IL_0047: beq.s IL_004b IL_0049: leave.s IL_00b2 IL_004b: ldc.i8 0x8000123480001234 IL_0054: stloc.2 IL_0055: ldc.i8 0x8123400081234000 IL_005e: stloc.3 IL_005f: ldloc.2 IL_0060: ldloc.3 IL_0061: and IL_0062: not IL_0063: ldc.i8 0x7fffffff7fffffff IL_006c: beq.s IL_0070 IL_006e: leave.s IL_00b2 IL_0070: ldc.i8 0x8000123480001234 IL_0079: stloc.2 IL_007a: ldc.i8 0x8123400081234000 IL_0083: stloc.3 IL_0084: ldloc.2 IL_0085: ldloc.3 IL_0086: or IL_0087: ldc.i8 0x8123523481235234 IL_0090: beq.s IL_0094 IL_0092: leave.s IL_00b2 IL_0094: leave.s IL_00a3 } // end .try catch ['mscorlib']System.Exception { IL_0096: pop IL_0097: ldstr "Exception handled!" IL_009c: call void [System.Console]System.Console::WriteLine(class System.String) IL_00a1: leave.s IL_00b2 } // end handler IL_00a3: ldstr "Passed" IL_00a8: call void [System.Console]System.Console::WriteLine(class System.String) IL_00ad: ldc.i4 0x64 IL_00ae: stloc.s V_4 IL_00b0: br.s IL_00c1 IL_00b2: ldstr "Failed" IL_00b7: call void [System.Console]System.Console::WriteLine(class System.String) IL_00bc: ldc.i4.1 IL_00bd: stloc.s V_4 IL_00c1: ldloc.s V_4 IL_00c3: ret } // end of method 'Test::Main' .method public hidebysig specialname rtspecialname instance void .ctor() il managed { // Code size 7 (0x7) .maxstack 8 IL_0000: ldarg.0 IL_0001: call instance void ['mscorlib']System.Object::.ctor() IL_0006: ret } // end of method 'Test::.ctor' } // end of class 'Test' } // end of namespace 'JitTest' //*********** DISASSEMBLY COMPLETE ***********************
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./eng/pipelines/coreclr/crossgen2-outerloop.yml
trigger: none schedules: - cron: "0 5 * * *" displayName: Mon through Sun at 9:00 PM (UTC-8:00) branches: include: - main always: true jobs: - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/build-job.yml buildConfig: checked platforms: - Linux_arm - Linux_x64 - Linux_arm64 - OSX_arm64 - OSX_x64 - windows_x86 - windows_x64 - windows_arm64 - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: outerloop - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/build-job.yml buildConfig: Release platforms: - Linux_arm - Linux_arm64 - Linux_x64 - OSX_arm64 - windows_x86 - windows_x64 jobParameters: testGroup: outerloop - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/libraries/build-job.yml buildConfig: Release platforms: - Linux_arm - Linux_arm64 - Linux_x64 - OSX_arm64 - OSX_x64 - windows_x86 - windows_x64 - windows_arm64 - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: isOfficialBuild: false liveRuntimeBuildConfig: Release - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/build-test-job.yml buildConfig: checked platforms: - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: outerloop # Test most platforms in composite mode as the expected mainline shipping mode - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/run-test-job.yml helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml buildConfig: checked platforms: - Linux_x64 - Linux_arm64 - OSX_arm64 - OSX_x64 - windows_x64 - windows_arm64 jobParameters: testGroup: outerloop readyToRun: true compositeBuildMode: true displayNameArgs: R2R_Composite liveLibrariesBuildConfig: Release # Outerloop testing in non-composite mode - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/run-test-job.yml helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml buildConfig: checked platforms: - Linux_arm - Linux_arm64 - Linux_x64 - OSX_x64 - OSX_arm64 - windows_arm64 - windows_x64 - windows_x86 jobParameters: testGroup: outerloop readyToRun: true displayNameArgs: R2R liveLibrariesBuildConfig: Release # Build Crossgen2 baselines # These are the various crossgen2 targets that are supported, and cover all major # significantly different code generators - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-build-job.yml buildConfig: Release platforms: - Linux_arm - Linux_arm64 - Linux_x64 - OSX_arm64 - windows_x86 - windows_x64 jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release # test crossgen target Windows X86 # This job verifies that 32-bit and 64 bit crossgen2 produces the same binaries, # and that cross-os targetting works - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-job.yml buildConfig: Release platforms: - Linux_x64 - windows_x86 helixQueueGroup: pr helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release targetos: windows targetarch: x86 # test target Linux X64 # verify that cross OS targetting works - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-job.yml buildConfig: Release platforms: - windows_x64 helixQueueGroup: pr helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release targetos: Linux targetarch: x64 # test target Windows X64 # verify that cross OS targetting works - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-job.yml buildConfig: Release platforms: - Linux_x64 helixQueueGroup: pr helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release targetos: windows targetarch: x64 # test target Linux arm # verify that cross architecture targetting works - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-job.yml buildConfig: Release platforms: - Linux_arm helixQueueGroup: pr helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release targetos: Linux targetarch: arm # test target Linux arm64 # verify that cross architecture targetting works - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-job.yml buildConfig: Release platforms: - Linux_arm64 helixQueueGroup: pr helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release targetos: Linux targetarch: arm64 # test target osx-arm64 # verify that cross architecture targetting works - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-job.yml buildConfig: Release platforms: - OSX_arm64 helixQueueGroup: pr helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release targetos: OSX targetarch: arm64
trigger: none schedules: - cron: "0 5 * * *" displayName: Mon through Sun at 9:00 PM (UTC-8:00) branches: include: - main always: true jobs: - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/build-job.yml buildConfig: checked platforms: - Linux_arm - Linux_x64 - Linux_arm64 - OSX_arm64 - OSX_x64 - windows_x86 - windows_x64 - windows_arm64 - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: outerloop - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/build-job.yml buildConfig: Release platforms: - Linux_arm - Linux_arm64 - Linux_x64 - OSX_arm64 - windows_x86 - windows_x64 jobParameters: testGroup: outerloop - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/libraries/build-job.yml buildConfig: Release platforms: - Linux_arm - Linux_arm64 - Linux_x64 - OSX_arm64 - OSX_x64 - windows_x86 - windows_x64 - windows_arm64 - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: isOfficialBuild: false liveRuntimeBuildConfig: Release - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/build-test-job.yml buildConfig: checked platforms: - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: outerloop # Test most platforms in composite mode as the expected mainline shipping mode - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/run-test-job.yml helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml buildConfig: checked platforms: - Linux_x64 - Linux_arm64 - OSX_arm64 - OSX_x64 - windows_x64 - windows_arm64 jobParameters: testGroup: outerloop readyToRun: true compositeBuildMode: true displayNameArgs: R2R_Composite liveLibrariesBuildConfig: Release # Outerloop testing in non-composite mode - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/run-test-job.yml helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml buildConfig: checked platforms: - Linux_arm - Linux_arm64 - Linux_x64 - OSX_x64 - OSX_arm64 - windows_arm64 - windows_x64 - windows_x86 jobParameters: testGroup: outerloop readyToRun: true displayNameArgs: R2R liveLibrariesBuildConfig: Release # Build Crossgen2 baselines # These are the various crossgen2 targets that are supported, and cover all major # significantly different code generators - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-build-job.yml buildConfig: Release platforms: - Linux_arm - Linux_arm64 - Linux_x64 - OSX_arm64 - windows_x86 - windows_x64 jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release # test crossgen target Windows X86 # This job verifies that 32-bit and 64 bit crossgen2 produces the same binaries, # and that cross-os targetting works - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-job.yml buildConfig: Release platforms: - Linux_x64 - windows_x86 helixQueueGroup: pr helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release targetos: windows targetarch: x86 # test target Linux X64 # verify that cross OS targetting works - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-job.yml buildConfig: Release platforms: - windows_x64 helixQueueGroup: pr helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release targetos: Linux targetarch: x64 # test target Windows X64 # verify that cross OS targetting works - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-job.yml buildConfig: Release platforms: - Linux_x64 helixQueueGroup: pr helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release targetos: windows targetarch: x64 # test target Linux arm # verify that cross architecture targetting works - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-job.yml buildConfig: Release platforms: - Linux_arm helixQueueGroup: pr helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release targetos: Linux targetarch: arm # test target Linux arm64 # verify that cross architecture targetting works - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-job.yml buildConfig: Release platforms: - Linux_arm64 helixQueueGroup: pr helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release targetos: Linux targetarch: arm64 # test target osx-arm64 # verify that cross architecture targetting works - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossgen2-comparison-job.yml buildConfig: Release platforms: - OSX_arm64 helixQueueGroup: pr helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release targetos: OSX targetarch: arm64
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/libraries/System.Private.Xml/src/System/Xml/Serialization/ReflectionXmlSerializationReader.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Concurrent; using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Linq.Expressions; using System.Reflection; using System.Runtime.CompilerServices; using System.Xml.Extensions; using System.Xml.Schema; // UnconditionalSuppressMessage that specify a Target need to be at the assembly or module level for now. Also, // they won't consider Target unless you also specify Scope to be either "member" or "type" [assembly: UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:RequiresUnreferencedCode", Target = "M:System.Xml.Serialization.ReflectionXmlSerializationReader.#cctor", Scope = "member", Justification = "The reason why this warns is because the two static properties call GetTypeDesc() which internally will call " + "ImportTypeDesc() when the passed in type is not considered a primitive type. That said, for both properties here we are passing in string " + "and XmlQualifiedName which are considered primitive, so they are trim safe.")] namespace System.Xml.Serialization { internal delegate void UnknownNodeAction(object? o); internal sealed class ReflectionXmlSerializationReader : XmlSerializationReader { private readonly XmlMapping _mapping; // Suppressed for the linker by the assembly-level UnconditionalSuppressMessageAttribute // https://github.com/dotnet/linker/issues/2648 #pragma warning disable IL2026 internal static TypeDesc StringTypeDesc { get; set; } = (new TypeScope()).GetTypeDesc(typeof(string)); internal static TypeDesc QnameTypeDesc { get; set; } = (new TypeScope()).GetTypeDesc(typeof(XmlQualifiedName)); #pragma warning restore IL2026 public ReflectionXmlSerializationReader(XmlMapping mapping, XmlReader xmlReader, XmlDeserializationEvents events, string? encodingStyle) { Init(xmlReader, events, encodingStyle, tempAssembly: null); _mapping = mapping; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] protected override void InitCallbacks() { TypeScope scope = _mapping.Scope!; foreach (TypeMapping mapping in scope.TypeMappings) { if (mapping.IsSoap && (mapping is StructMapping || mapping is EnumMapping || mapping is ArrayMapping || mapping is NullableMapping) && !mapping.TypeDesc!.IsRoot) { AddReadCallback( mapping.TypeName!, mapping.Namespace!, mapping.TypeDesc.Type!, CreateXmlSerializationReadCallback(mapping)); } } } protected override void InitIDs() { } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] public object? ReadObject() { XmlMapping xmlMapping = _mapping; if (!xmlMapping.IsReadable) return null; if (!xmlMapping.GenerateSerializer) throw new ArgumentException(SR.Format(SR.XmlInternalError, "xmlMapping")); if (xmlMapping is XmlTypeMapping xmlTypeMapping) { return GenerateTypeElement(xmlTypeMapping); } else if (xmlMapping is XmlMembersMapping xmlMembersMapping) { return GenerateMembersElement(xmlMembersMapping); } else { throw new ArgumentException(SR.Format(SR.XmlInternalError, "xmlMapping")); } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object GenerateMembersElement(XmlMembersMapping xmlMembersMapping) { if (xmlMembersMapping.Accessor.IsSoap) { return GenerateEncodedMembersElement(xmlMembersMapping); } else { return GenerateLiteralMembersElement(xmlMembersMapping); } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object GenerateLiteralMembersElement(XmlMembersMapping xmlMembersMapping) { ElementAccessor element = xmlMembersMapping.Accessor; MemberMapping[] mappings = ((MembersMapping)element.Mapping!).Members!; bool hasWrapperElement = ((MembersMapping)element.Mapping).HasWrapperElement; Reader.MoveToContent(); object[] p = new object[mappings.Length]; InitializeValueTypes(p, mappings); if (hasWrapperElement) { string elementName = element.Name; string elementNs = element.Form == XmlSchemaForm.Qualified ? element.Namespace! : string.Empty; Reader.MoveToContent(); while (Reader.NodeType != XmlNodeType.EndElement && Reader.NodeType != XmlNodeType.None) { if (Reader.IsStartElement(element.Name, elementNs)) { if (!GenerateLiteralMembersElementInternal(mappings, hasWrapperElement, p)) { continue; } ReadEndElement(); } else { UnknownNode(null, $"{elementNs}:{elementName}"); } Reader.MoveToContent(); } } else { GenerateLiteralMembersElementInternal(mappings, hasWrapperElement, p); } return p; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private bool GenerateLiteralMembersElementInternal(MemberMapping[] mappings, bool hasWrapperElement, object?[] p) { Member? anyText = null; Member? anyElement = null; Member? anyAttribute = null; var membersList = new List<Member>(); var textOrArrayMembersList = new List<Member>(); var attributeMembersList = new List<Member>(); for (int i = 0; i < mappings.Length; i++) { int index = i; MemberMapping mapping = mappings[index]; Action<object?> source = (o) => p[index] = o; Member member = new Member(mapping); Member anyMember = new Member(mapping); if (mapping.Xmlns != null) { var xmlns = new XmlSerializerNamespaces(); p[index] = xmlns; member.XmlnsSource = (ns, name) => xmlns.Add(ns, name); } member.Source = source; anyMember.Source = source; if (mapping.CheckSpecified == SpecifiedAccessor.ReadWrite) { string nameSpecified = $"{mapping.Name}Specified"; for (int j = 0; j < mappings.Length; j++) { if (mappings[j].Name == nameSpecified) { int indexJ = j; member.CheckSpecifiedSource = (o) => p[indexJ] = o; } } } bool foundAnyElement = false; if (mapping.Text != null) { anyText = anyMember; } if (mapping.Attribute != null && mapping.Attribute.Any) { anyMember.Collection = new CollectionMember(); anyMember.ArraySource = anyMember.Source; anyMember.Source = (item) => { anyMember.Collection.Add(item); }; anyAttribute = anyMember; } if (mapping.Attribute != null || mapping.Xmlns != null) { attributeMembersList.Add(member); } else if (mapping.Text != null) { textOrArrayMembersList.Add(member); } if (!mapping.IsSequence) { for (int j = 0; j < mapping.Elements!.Length; j++) { if (mapping.Elements[j].Any && mapping.Elements[j].Name.Length == 0) { anyElement = anyMember; if (mapping.Attribute == null && mapping.Text == null) { anyMember.Collection = new CollectionMember(); anyMember.ArraySource = (item) => { anyMember.Collection.Add(item); }; textOrArrayMembersList.Add(anyMember); } foundAnyElement = true; break; } } } if (mapping.Attribute != null || mapping.Text != null || foundAnyElement) { membersList.Add(anyMember); } else if (mapping.TypeDesc!.IsArrayLike && !(mapping.Elements!.Length == 1 && mapping.Elements[0].Mapping is ArrayMapping)) { anyMember.Collection = new CollectionMember(); anyMember.ArraySource = (item) => { anyMember.Collection.Add(item); }; membersList.Add(anyMember); textOrArrayMembersList.Add(anyMember); } else { membersList.Add(member); } } Member[] members = membersList.ToArray(); Member[] textOrArrayMembers = textOrArrayMembersList.ToArray(); if (members.Length > 0 && members[0].Mapping.IsReturnValue) IsReturnValue = true; if (attributeMembersList.Count > 0) { Member[] attributeMembers = attributeMembersList.ToArray(); object? tempObject = null; WriteAttributes(attributeMembers, anyAttribute, UnknownNode, ref tempObject); Reader.MoveToElement(); } if (hasWrapperElement) { if (Reader.IsEmptyElement) { Reader.Skip(); Reader.MoveToContent(); return false; } Reader.ReadStartElement(); } Reader.MoveToContent(); while (Reader.NodeType != XmlNodeType.EndElement && Reader.NodeType != XmlNodeType.None) { WriteMemberElements(members, UnknownNode, UnknownNode, anyElement, anyText, null); Reader.MoveToContent(); } foreach (Member member in textOrArrayMembers) { object? value = null; SetCollectionObjectWithCollectionMember(ref value, member.Collection!, member.Mapping.TypeDesc!.Type!); member.Source!(value); } if (anyAttribute != null) { object? value = null; SetCollectionObjectWithCollectionMember(ref value, anyAttribute.Collection!, anyAttribute.Mapping.TypeDesc!.Type!); anyAttribute.ArraySource!(value); } return true; } private void InitializeValueTypes(object?[] p, MemberMapping[] mappings) { for (int i = 0; i < mappings.Length; i++) { if (!mappings[i].TypeDesc!.IsValueType) continue; if (mappings[i].TypeDesc!.IsOptionalValue && mappings[i].TypeDesc!.BaseTypeDesc!.UseReflection) { p[i] = null; } else { p[i] = ReflectionCreateObject(mappings[i].TypeDesc!.Type!); } } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object GenerateEncodedMembersElement(XmlMembersMapping xmlMembersMapping) { ElementAccessor element = xmlMembersMapping.Accessor; var membersMapping = (MembersMapping)element.Mapping!; MemberMapping[] mappings = membersMapping.Members!; bool hasWrapperElement = membersMapping.HasWrapperElement; bool writeAccessors = membersMapping.WriteAccessors; Reader.MoveToContent(); object?[] p = new object[mappings.Length]; InitializeValueTypes(p, mappings); bool isEmptyWrapper = true; if (hasWrapperElement) { Reader.MoveToContent(); while (Reader.NodeType == XmlNodeType.Element) { string? root = Reader.GetAttribute("root", Soap.Encoding); if (root == null || XmlConvert.ToBoolean(root)) break; ReadReferencedElement(); Reader.MoveToContent(); } if (membersMapping.ValidateRpcWrapperElement) { string name = element.Name; string? ns = element.Form == XmlSchemaForm.Qualified ? element.Namespace : string.Empty; if (!XmlNodeEqual(Reader, name, ns)) { throw CreateUnknownNodeException(); } } isEmptyWrapper = Reader.IsEmptyElement; Reader.ReadStartElement(); } Member[] members = new Member[mappings.Length]; for (int i = 0; i < mappings.Length; i++) { int index = i; MemberMapping mapping = mappings[index]; var member = new Member(mapping); member.Source = (value) => p[index] = value; members[index] = member; if (mapping.CheckSpecified == SpecifiedAccessor.ReadWrite) { string nameSpecified = $"{mapping.Name}Specified"; for (int j = 0; j < mappings.Length; j++) { if (mappings[j].Name == nameSpecified) { int indexOfSpecifiedMember = j; member.CheckSpecifiedSource = (value) => p[indexOfSpecifiedMember] = value; break; } } } } Fixup? fixup = WriteMemberFixupBegin(members, p); if (members.Length > 0 && members[0].Mapping.IsReturnValue) { IsReturnValue = true; } List<CheckTypeSource>? checkTypeHrefSource = null; if (!hasWrapperElement && !writeAccessors) { checkTypeHrefSource = new List<CheckTypeSource>(); } Reader.MoveToContent(); while (Reader.NodeType != XmlNodeType.EndElement && Reader.NodeType != XmlNodeType.None) { UnknownNodeAction unrecognizedElementSource; if (checkTypeHrefSource == null) { unrecognizedElementSource = (_) => UnknownNode(p); } else { unrecognizedElementSource = Wrapper; [RequiresUnreferencedCode("calls ReadReferencedElement")] void Wrapper(object? _) { if (Reader.GetAttribute("id", null) != null) { ReadReferencedElement(); } else { UnknownNode(p); } } } WriteMemberElements(members, unrecognizedElementSource, (_) => UnknownNode(p), null, null, fixup: fixup, checkTypeHrefsSource: checkTypeHrefSource); Reader.MoveToContent(); } if (!isEmptyWrapper) { ReadEndElement(); } if (checkTypeHrefSource != null) { foreach (CheckTypeSource currentySource in checkTypeHrefSource) { bool isReferenced = true; bool isObject = currentySource.IsObject; object? refObj = isObject ? currentySource.RefObject : GetTarget((string)currentySource.RefObject!); if (refObj == null) { continue; } var checkTypeSource = new CheckTypeSource() { RefObject = refObj, Type = refObj.GetType(), Id = null }; WriteMemberElementsIf(members, null, (_) => isReferenced = false, fixup, checkTypeSource); if (isObject && isReferenced) { Referenced(refObj); } } } ReadReferencedElements(); return p; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? GenerateTypeElement(XmlTypeMapping xmlTypeMapping) { ElementAccessor element = xmlTypeMapping.Accessor; TypeMapping mapping = element.Mapping!; Reader.MoveToContent(); var memberMapping = new MemberMapping(); memberMapping.TypeDesc = mapping.TypeDesc; memberMapping.Elements = new ElementAccessor[] { element }; object? o = null; var holder = new ObjectHolder(); var member = new Member(memberMapping); member.Source = (value) => holder.Object = value; member.GetSource = () => holder.Object; UnknownNodeAction elementElseAction = CreateUnknownNodeException; UnknownNodeAction elseAction = UnknownNode; WriteMemberElements(new Member[] { member }, elementElseAction, elseAction, element.Any ? member : null, null); o = holder.Object; if (element.IsSoap) { Referenced(o); ReadReferencedElements(); } return o; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteMemberElements(Member[] expectedMembers, UnknownNodeAction elementElseAction, UnknownNodeAction elseAction, Member? anyElement, Member? anyText, Fixup? fixup = null, List<CheckTypeSource>? checkTypeHrefsSource = null) { bool checkType = checkTypeHrefsSource != null; if (Reader.NodeType == XmlNodeType.Element) { if (checkType) { if (Reader.GetAttribute("root", Soap.Encoding) == "0") { elementElseAction(null); return; } WriteMemberElementsCheckType(checkTypeHrefsSource!); } else { WriteMemberElementsIf(expectedMembers, anyElement, elementElseAction, fixup: fixup); } } else if (anyText != null && anyText.Mapping != null && WriteMemberText(anyText)) { } else { ProcessUnknownNode(elseAction); } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteMemberElementsCheckType(List<CheckTypeSource> checkTypeHrefsSource) { object? RefElememnt = ReadReferencingElement(null, null, true, out string? refElemId); var source = new CheckTypeSource(); if (refElemId != null) { source.RefObject = refElemId; source.IsObject = false; checkTypeHrefsSource.Add(source); } else if (RefElememnt != null) { source.RefObject = RefElememnt; source.IsObject = true; checkTypeHrefsSource.Add(source); } } private void ProcessUnknownNode(UnknownNodeAction action) { action?.Invoke(null); } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteMembers(ref object? o, Member[] members, UnknownNodeAction elementElseAction, UnknownNodeAction elseAction, Member? anyElement, Member? anyText) { Reader.MoveToContent(); while (Reader.NodeType != XmlNodeType.EndElement && Reader.NodeType != XmlNodeType.None) { WriteMemberElements(members, elementElseAction, elseAction, anyElement, anyText); Reader.MoveToContent(); } } private void SetCollectionObjectWithCollectionMember([NotNull] ref object? collection, CollectionMember collectionMember, [DynamicallyAccessedMembers(TrimmerConstants.AllMethods)] Type collectionType) { if (collectionType.IsArray) { Array a; if (collection is Array currentArray && currentArray.Length == collectionMember.Count) { a = currentArray; } else { Type elementType = collectionType.GetElementType()!; a = Array.CreateInstance(elementType, collectionMember.Count); } for (int i = 0; i < collectionMember.Count; i++) { a.SetValue(collectionMember[i], i); } collection = a; } else { if (collection == null) { collection = ReflectionCreateObject(collectionType)!; } AddObjectsIntoTargetCollection(collection, collectionMember, collectionType); } } private static void AddObjectsIntoTargetCollection(object targetCollection, List<object?> sourceCollection, [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods)] Type targetCollectionType) { if (targetCollection is IList targetList) { foreach (object? item in sourceCollection) { targetList.Add(item); } } else { MethodInfo? addMethod = targetCollectionType.GetMethod("Add"); if (addMethod == null) { throw new InvalidOperationException(SR.XmlInternalError); } object?[] arguments = new object?[1]; foreach (object? item in sourceCollection) { arguments[0] = item; addMethod.Invoke(targetCollection, arguments); } } } private static readonly ContextAwareTables<Hashtable> s_setMemberValueDelegateCache = new ContextAwareTables<Hashtable>(); [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private static ReflectionXmlSerializationReaderHelper.SetMemberValueDelegate GetSetMemberValueDelegate(object o, string memberName) { Debug.Assert(o != null, "Object o should not be null"); Debug.Assert(!string.IsNullOrEmpty(memberName), "memberName must have a value"); Type type = o.GetType(); var delegateCacheForType = s_setMemberValueDelegateCache.GetOrCreateValue(type, () => new Hashtable()); var result = delegateCacheForType[memberName]; if (result == null) { lock (delegateCacheForType) { if ((result = delegateCacheForType[memberName]) == null) { MemberInfo memberInfo = ReflectionXmlSerializationHelper.GetEffectiveSetInfo(o.GetType(), memberName); Debug.Assert(memberInfo != null, "memberInfo could not be retrieved"); Type memberType; if (memberInfo is PropertyInfo propInfo) { memberType = propInfo.PropertyType; } else if (memberInfo is FieldInfo fieldInfo) { memberType = fieldInfo.FieldType; } else { throw new InvalidOperationException(SR.XmlInternalError); } MethodInfo getSetMemberValueDelegateWithTypeGenericMi = typeof(ReflectionXmlSerializationReaderHelper).GetMethod("GetSetMemberValueDelegateWithType", BindingFlags.Static | BindingFlags.Public)!; MethodInfo getSetMemberValueDelegateWithTypeMi = getSetMemberValueDelegateWithTypeGenericMi.MakeGenericMethod(o.GetType(), memberType); var getSetMemberValueDelegateWithType = (Func<MemberInfo, ReflectionXmlSerializationReaderHelper.SetMemberValueDelegate>)getSetMemberValueDelegateWithTypeMi.CreateDelegate(typeof(Func<MemberInfo, ReflectionXmlSerializationReaderHelper.SetMemberValueDelegate>)); result = getSetMemberValueDelegateWithType(memberInfo); delegateCacheForType[memberName] = result; } } } return (ReflectionXmlSerializationReaderHelper.SetMemberValueDelegate)result; } private object? GetMemberValue(object o, MemberInfo memberInfo) { if (memberInfo is PropertyInfo propertyInfo) { return propertyInfo.GetValue(o); } else if (memberInfo is FieldInfo fieldInfo) { return fieldInfo.GetValue(o); } throw new InvalidOperationException(SR.XmlInternalError); } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private bool WriteMemberText(Member anyText) { object? value; MemberMapping anyTextMapping = anyText.Mapping; if ((Reader.NodeType == XmlNodeType.Text || Reader.NodeType == XmlNodeType.CDATA || Reader.NodeType == XmlNodeType.Whitespace || Reader.NodeType == XmlNodeType.SignificantWhitespace)) { TextAccessor text = anyTextMapping.Text!; if (text.Mapping is SpecialMapping special) { if (special.TypeDesc!.Kind == TypeKind.Node) { value = Document.CreateTextNode(Reader.ReadString()); } else { throw new InvalidOperationException(SR.XmlInternalError); } } else { if (anyTextMapping.TypeDesc!.IsArrayLike) { if (text.Mapping!.TypeDesc!.CollapseWhitespace) { value = CollapseWhitespace(Reader.ReadString()); } else { value = Reader.ReadString(); } } else { if (text.Mapping!.TypeDesc == StringTypeDesc || text.Mapping.TypeDesc!.FormatterName == "String") { value = ReadString(null, text.Mapping.TypeDesc.CollapseWhitespace); } else { value = WritePrimitive(text.Mapping, (state) => ((ReflectionXmlSerializationReader)state).Reader.ReadString(), this); } } } anyText.Source!(value); return true; } return false; } private bool IsSequence(Member[] members) { // https://github.com/dotnet/runtime/issues/1402: // Currently the reflection based method treat this kind of type as normal types. // But potentially we can do some optimization for types that have ordered properties. return false; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteMemberElementsIf(Member[] expectedMembers, Member? anyElementMember, UnknownNodeAction elementElseAction, Fixup? fixup = null, CheckTypeSource? checkTypeSource = null) { bool checkType = checkTypeSource != null; bool isSequence = IsSequence(expectedMembers); if (isSequence) { // https://github.com/dotnet/runtime/issues/1402: // Currently the reflection based method treat this kind of type as normal types. // But potentially we can do some optimization for types that have ordered properties. } ElementAccessor? e = null; Member? member = null; bool foundElement = false; int elementIndex = -1; foreach (Member m in expectedMembers) { if (m.Mapping.Xmlns != null) continue; if (m.Mapping.Ignore) continue; if (isSequence && (m.Mapping.IsText || m.Mapping.IsAttribute)) continue; for (int i = 0; i < m.Mapping.Elements!.Length; i++) { ElementAccessor ele = m.Mapping.Elements[i]; string? ns = ele.Form == XmlSchemaForm.Qualified ? ele.Namespace : string.Empty; if (checkType) { Type elementType; if (ele.Mapping is NullableMapping nullableMapping) { TypeDesc td = nullableMapping.BaseMapping!.TypeDesc!; elementType = td.Type!; } else { elementType = ele.Mapping!.TypeDesc!.Type!; } if (elementType.IsAssignableFrom(checkTypeSource!.Type)) { foundElement = true; } } else if (ele.Name == Reader.LocalName && ns == Reader.NamespaceURI) { foundElement = true; } if (foundElement) { e = ele; member = m; elementIndex = i; break; } } if (foundElement) break; } if (foundElement) { if (checkType) { member!.Source!(checkTypeSource!.RefObject!); if (member.FixupIndex >= 0) { fixup!.Ids![member.FixupIndex] = checkTypeSource.Id; } } else { string? ns = e!.Form == XmlSchemaForm.Qualified ? e.Namespace : string.Empty; bool isList = member!.Mapping.TypeDesc!.IsArrayLike && !member.Mapping.TypeDesc.IsArray; WriteElement(e, member.Mapping.CheckSpecified == SpecifiedAccessor.ReadWrite, isList && member.Mapping.TypeDesc.IsNullable, member.Mapping.ReadOnly, ns, member.FixupIndex, elementIndex, fixup, member); } } else { if (anyElementMember != null && anyElementMember.Mapping != null) { MemberMapping anyElement = anyElementMember.Mapping; member = anyElementMember; ElementAccessor[] elements = anyElement.Elements!; for (int i = 0; i < elements.Length; i++) { ElementAccessor element = elements[i]; if (element.Any && element.Name.Length == 0) { string? ns = element.Form == XmlSchemaForm.Qualified ? element.Namespace : string.Empty; WriteElement(element, anyElement.CheckSpecified == SpecifiedAccessor.ReadWrite, false, false, ns, fixup: fixup, member: member); break; } } } else { member = null; ProcessUnknownNode(elementElseAction); } } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteElement(ElementAccessor element, bool checkSpecified, bool checkForNull, bool readOnly, string? defaultNamespace, int fixupIndex = -1, int elementIndex = -1, Fixup? fixup = null, Member? member = null) { object? value = null; if (element.Mapping is ArrayMapping arrayMapping) { value = WriteArray(arrayMapping, readOnly, element.IsNullable, defaultNamespace, fixupIndex, fixup, member); } else if (element.Mapping is NullableMapping nullableMapping) { value = WriteNullableMethod(nullableMapping, true, defaultNamespace); } else if (!element.Mapping!.IsSoap && (element.Mapping is PrimitiveMapping)) { if (element.IsNullable && ReadNull()) { if (element.Mapping.TypeDesc!.IsValueType) { value = ReflectionCreateObject(element.Mapping.TypeDesc.Type!); } else { value = null; } } else if ((element.Default != null && element.Default != DBNull.Value && element.Mapping.TypeDesc!.IsValueType) && (Reader.IsEmptyElement)) { Reader.Skip(); } else if (element.Mapping.TypeDesc!.Type == typeof(TimeSpan) && Reader.IsEmptyElement) { Reader.Skip(); value = default(TimeSpan); } else if (element.Mapping.TypeDesc!.Type == typeof(DateTimeOffset) && Reader.IsEmptyElement) { Reader.Skip(); value = default(DateTimeOffset); } else { if (element.Mapping.TypeDesc == QnameTypeDesc) { value = ReadElementQualifiedName(); } else { if (element.Mapping.TypeDesc.FormatterName == "ByteArrayBase64") { value = ToByteArrayBase64(false); } else if (element.Mapping.TypeDesc.FormatterName == "ByteArrayHex") { value = ToByteArrayHex(false); } else { Func<object, string> readFunc = (state) => ((XmlReader)state).ReadElementContentAsString(); value = WritePrimitive(element.Mapping, readFunc, Reader); } } } } else if (element.Mapping is StructMapping || (element.Mapping.IsSoap && element.Mapping is PrimitiveMapping)) { TypeMapping mapping = element.Mapping; if (mapping.IsSoap) { object? rre = fixupIndex >= 0 ? ReadReferencingElement(mapping.TypeName, mapping.Namespace, out fixup!.Ids![fixupIndex]) : ReadReferencedElement(mapping.TypeName, mapping.Namespace); if (!mapping.TypeDesc!.IsValueType || rre != null) { value = rre; Referenced(value); } if (fixupIndex >= 0) { if (member == null) { throw new InvalidOperationException(SR.XmlInternalError); } member.Source!(value!); return value; } } else { if (checkForNull && (member!.Source == null && member.ArraySource == null)) { Reader.Skip(); } else { value = WriteStructMethod( mapping: (StructMapping)mapping, isNullable: mapping.TypeDesc!.IsNullable && element.IsNullable, checkType: true, defaultNamespace: defaultNamespace ); } } } else if (element.Mapping is SpecialMapping specialMapping) { switch (specialMapping.TypeDesc!.Kind) { case TypeKind.Node: bool isDoc = specialMapping.TypeDesc.FullName == typeof(XmlDocument).FullName; if (isDoc) { value = ReadXmlDocument(!element.Any); } else { value = ReadXmlNode(!element.Any); } break; case TypeKind.Serializable: SerializableMapping sm = (SerializableMapping)element.Mapping; // check to see if we need to do the derivation bool flag = true; if (sm.DerivedMappings != null) { XmlQualifiedName? tser = GetXsiType(); if (tser == null || QNameEqual(tser, sm.XsiType!.Name, sm.XsiType.Namespace, defaultNamespace)) { } else { flag = false; } } if (flag) { bool isWrappedAny = !element.Any && IsWildcard(sm); value = ReadSerializable((IXmlSerializable)ReflectionCreateObject(sm.TypeDesc!.Type!)!, isWrappedAny); } if (sm.DerivedMappings != null) { // https://github.com/dotnet/runtime/issues/1401: // To Support SpecialMapping Types Having DerivedMappings throw new NotImplementedException("sm.DerivedMappings != null"); //WriteDerivedSerializable(sm, sm, source, isWrappedAny); //WriteUnknownNode("UnknownNode", "null", null, true); } break; default: throw new InvalidOperationException(SR.XmlInternalError); } } else { throw new InvalidOperationException(SR.XmlInternalError); } member?.ChoiceSource?.Invoke(element.Name); if (member?.ArraySource != null) { member?.ArraySource(value!); } else { member?.Source?.Invoke(value!); member?.CheckSpecifiedSource?.Invoke(true); } return value; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private XmlSerializationReadCallback CreateXmlSerializationReadCallback(TypeMapping mapping) { if (mapping is StructMapping structMapping) { [RequiresUnreferencedCode("calls WriteStructMethod")] object? WriteStruct() => WriteStructMethod(structMapping, mapping.TypeDesc!.IsNullable, true, defaultNamespace: null); return WriteStruct; } else if (mapping is EnumMapping enumMapping) { return () => WriteEnumMethodSoap(enumMapping); } else if (mapping is NullableMapping nullableMapping) { [RequiresUnreferencedCode("calls WriteNullableMethod")] object? Wrapper() => WriteNullableMethod(nullableMapping, false, null); return Wrapper; } return DummyReadArrayMethod; } private static void NoopAction(object? o) { } private object? DummyReadArrayMethod() { UnknownNode(null); return null; } private static Type GetMemberType(MemberInfo memberInfo) { Type memberType; if (memberInfo is FieldInfo fieldInfo) { memberType = fieldInfo.FieldType; } else if (memberInfo is PropertyInfo propertyInfo) { memberType = propertyInfo.PropertyType; } else { throw new InvalidOperationException(SR.XmlInternalError); } return memberType; } private static bool IsWildcard(SpecialMapping mapping) { if (mapping is SerializableMapping serializableMapping) return serializableMapping.IsAny; return mapping.TypeDesc!.CanBeElementValue; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteArray(ArrayMapping arrayMapping, bool readOnly, bool isNullable, string? defaultNamespace, int fixupIndex = -1, Fixup? fixup = null, Member? member = null) { object? o = null; if (arrayMapping.IsSoap) { object? rre; if (fixupIndex >= 0) { rre = ReadReferencingElement(arrayMapping.TypeName, arrayMapping.Namespace, out fixup!.Ids![fixupIndex]); } else { rre = ReadReferencedElement(arrayMapping.TypeName, arrayMapping.Namespace); } TypeDesc td = arrayMapping.TypeDesc!; if (rre != null) { if (td.IsEnumerable || td.IsCollection) { WriteAddCollectionFixup(member!.GetSource!, member.Source!, rre, td, readOnly); // member.Source has been set at this point. // Setting the source to no-op to avoid setting the // source again. member.Source = NoopAction; } else { if (member == null) { throw new InvalidOperationException(SR.XmlInternalError); } member.Source!(rre); } } o = rre; } else { if (!ReadNull()) { var memberMapping = new MemberMapping() { Elements = arrayMapping.Elements, TypeDesc = arrayMapping.TypeDesc, ReadOnly = readOnly }; Type collectionType = memberMapping.TypeDesc!.Type!; o = ReflectionCreateObject(memberMapping.TypeDesc.Type!); if (memberMapping.ChoiceIdentifier != null) { // https://github.com/dotnet/runtime/issues/1400: // To Support ArrayMapping Types Having ChoiceIdentifier throw new NotImplementedException("memberMapping.ChoiceIdentifier != null"); } var arrayMember = new Member(memberMapping); arrayMember.Collection = new CollectionMember(); arrayMember.ArraySource = (item) => { arrayMember.Collection.Add(item); }; if ((readOnly && o == null) || Reader.IsEmptyElement) { Reader.Skip(); } else { Reader.ReadStartElement(); Reader.MoveToContent(); while (Reader.NodeType != XmlNodeType.EndElement && Reader.NodeType != XmlNodeType.None) { WriteMemberElements(new Member[] { arrayMember }, UnknownNode, UnknownNode, null, null); Reader.MoveToContent(); } ReadEndElement(); } SetCollectionObjectWithCollectionMember(ref o, arrayMember.Collection, collectionType); } } return o; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object WritePrimitive(TypeMapping mapping, Func<object, string> readFunc, object funcState) { if (mapping is EnumMapping enumMapping) { return WriteEnumMethod(enumMapping, readFunc, funcState); } else if (mapping.TypeDesc == StringTypeDesc) { return readFunc(funcState); } else if (mapping.TypeDesc!.FormatterName == "String") { if (mapping.TypeDesc.CollapseWhitespace) { return CollapseWhitespace(readFunc(funcState)); } else { return readFunc(funcState); } } else { if (!mapping.TypeDesc.HasCustomFormatter) { string value = readFunc(funcState); object retObj = mapping.TypeDesc.FormatterName switch { "Boolean" => XmlConvert.ToBoolean(value), "Int32" => XmlConvert.ToInt32(value), "Int16" => XmlConvert.ToInt16(value), "Int64" => XmlConvert.ToInt64(value), "Single" => XmlConvert.ToSingle(value), "Double" => XmlConvert.ToDouble(value), "Decimal" => XmlConvert.ToDecimal(value), "Byte" => XmlConvert.ToByte(value), "SByte" => XmlConvert.ToSByte(value), "UInt16" => XmlConvert.ToUInt16(value), "UInt32" => XmlConvert.ToUInt32(value), "UInt64" => XmlConvert.ToUInt64(value), "Guid" => XmlConvert.ToGuid(value), "Char" => XmlConvert.ToChar(value), "TimeSpan" => XmlConvert.ToTimeSpan(value), "DateTimeOffset" => XmlConvert.ToDateTimeOffset(value), _ => throw new InvalidOperationException(SR.Format(SR.XmlInternalErrorDetails, $"unknown FormatterName: {mapping.TypeDesc.FormatterName}")), }; return retObj; } else { string methodName = $"To{mapping.TypeDesc.FormatterName}"; MethodInfo? method = typeof(XmlSerializationReader).GetMethod(methodName, BindingFlags.Static | BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic, new Type[] { typeof(string) }); if (method == null) { throw new InvalidOperationException(SR.Format(SR.XmlInternalErrorDetails, $"unknown FormatterName: {mapping.TypeDesc.FormatterName}")); } return method.Invoke(this, new object[] { readFunc(funcState) })!; } } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteStructMethod(StructMapping mapping, bool isNullable, bool checkType, string? defaultNamespace) { if (mapping.IsSoap) return WriteEncodedStructMethod(mapping); else return WriteLiteralStructMethod(mapping, isNullable, checkType, defaultNamespace); } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteNullableMethod(NullableMapping nullableMapping, bool checkType, string? defaultNamespace) { object? o = Activator.CreateInstance(nullableMapping.TypeDesc!.Type!); if (!ReadNull()) { ElementAccessor element = new ElementAccessor(); element.Mapping = nullableMapping.BaseMapping; element.Any = false; element.IsNullable = nullableMapping.BaseMapping!.TypeDesc!.IsNullable; o = WriteElement(element, false, false, false, defaultNamespace); } return o; } private object WriteEnumMethod(EnumMapping mapping, Func<object, string> readFunc, object funcState) { Debug.Assert(!mapping.IsSoap, "mapping.IsSoap was true. Use WriteEnumMethodSoap for reading SOAP encoded enum value."); string source = readFunc(funcState); return WriteEnumMethod(mapping, source); } private object WriteEnumMethodSoap(EnumMapping mapping) { string source = Reader.ReadElementString(); return WriteEnumMethod(mapping, source); } private object WriteEnumMethod(EnumMapping mapping, string source) { if (mapping.IsFlags) { Hashtable table = WriteHashtable(mapping, mapping.TypeDesc!.Name); return Enum.ToObject(mapping.TypeDesc.Type!, ToEnum(source, table, mapping.TypeDesc.Name)); } else { foreach (ConstantMapping c in mapping.Constants!) { if (string.Equals(c.XmlName, source)) { return Enum.Parse(mapping.TypeDesc!.Type!, c.Name); } } throw CreateUnknownConstantException(source, mapping.TypeDesc!.Type!); } } private Hashtable WriteHashtable(EnumMapping mapping, string name) { var h = new Hashtable(); ConstantMapping[] constants = mapping.Constants!; for (int i = 0; i < constants.Length; i++) { h.Add(constants[i].XmlName, constants[i].Value); } return h; } private object? ReflectionCreateObject( [DynamicallyAccessedMembers(TrimmerConstants.AllMethods)] Type type) { object? obj; if (type.IsArray) { obj = Activator.CreateInstance(type, 32); } else { ConstructorInfo? ci = GetDefaultConstructor(type); if (ci != null) { obj = ci.Invoke(Array.Empty<object>()); } else { obj = Activator.CreateInstance(type); } } return obj; } private ConstructorInfo? GetDefaultConstructor( [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.NonPublicConstructors)] Type type) => type.IsValueType ? null : type.GetConstructor(BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.Instance | BindingFlags.DeclaredOnly, null, Type.EmptyTypes, null); [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteEncodedStructMethod(StructMapping structMapping) { if (structMapping.TypeDesc!.IsRoot) return null; Member[]? members = null; if (structMapping.TypeDesc.IsAbstract) { throw CreateAbstractTypeException(structMapping.TypeName!, structMapping.Namespace); } else { object? o = ReflectionCreateObject(structMapping.TypeDesc.Type!); MemberMapping[] mappings = TypeScope.GetSettableMembers(structMapping); members = new Member[mappings.Length]; for (int i = 0; i < mappings.Length; i++) { MemberMapping mapping = mappings[i]; var member = new Member(mapping); TypeDesc td = member.Mapping.TypeDesc!; if (td.IsCollection || td.IsEnumerable) { member.Source = Wrapper; [RequiresUnreferencedCode("Calls WriteAddCollectionFixup")] void Wrapper(object? value) { WriteAddCollectionFixup(o!, member, value!); } } else if (!member.Mapping.ReadOnly) { var setterDelegate = GetSetMemberValueDelegate(o!, member.Mapping.MemberInfo!.Name); member.Source = (value) => setterDelegate(o, value); } else { member.Source = NoopAction; } members[i] = member; } Fixup? fixup = WriteMemberFixupBegin(members, o); UnknownNodeAction unknownNodeAction = (_) => UnknownNode(o); WriteAttributes(members, null, unknownNodeAction, ref o); Reader.MoveToElement(); if (Reader.IsEmptyElement) { Reader.Skip(); return o; } Reader.ReadStartElement(); Reader.MoveToContent(); while (Reader.NodeType != XmlNodeType.EndElement && Reader.NodeType != XmlNodeType.None) { WriteMemberElements(members, UnknownNode, UnknownNode, null, null, fixup: fixup); Reader.MoveToContent(); } ReadEndElement(); return o; } } private Fixup? WriteMemberFixupBegin(Member[] members, object? o) { int fixupCount = 0; foreach (Member member in members) { if (member.Mapping.Elements!.Length == 0) continue; TypeMapping? mapping = member.Mapping.Elements[0].Mapping; if (mapping is StructMapping || mapping is ArrayMapping || mapping is PrimitiveMapping || mapping is NullableMapping) { member.MultiRef = true; member.FixupIndex = fixupCount++; } } Fixup? fixup; if (fixupCount > 0) { fixup = new Fixup(o, CreateWriteFixupMethod(members), fixupCount); AddFixup(fixup); } else { fixup = null; } return fixup; } private XmlSerializationFixupCallback CreateWriteFixupMethod(Member[] members) { return (fixupObject) => { var fixup = (Fixup)fixupObject; string[] ids = fixup.Ids!; foreach (Member member in members) { if (member.MultiRef) { int fixupIndex = member.FixupIndex; if (ids[fixupIndex] != null) { var memberValue = GetTarget(ids[fixupIndex]); member.Source!(memberValue); } } } }; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteAddCollectionFixup(object o, Member member, object memberValue) { TypeDesc typeDesc = member.Mapping.TypeDesc!; bool readOnly = member.Mapping.ReadOnly; Func<object?> getSource = () => GetMemberValue(o, member.Mapping.MemberInfo!); var setterDelegate = GetSetMemberValueDelegate(o, member.Mapping.MemberInfo!.Name); Action<object?> setSource = (value) => setterDelegate(o, value); WriteAddCollectionFixup(getSource, setSource, memberValue, typeDesc, readOnly); } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteAddCollectionFixup(Func<object?> getSource, Action<object?> setSource, object memberValue, TypeDesc typeDesc, bool readOnly) { object? memberSource = getSource(); if (memberSource == null) { if (readOnly) { throw CreateReadOnlyCollectionException(typeDesc.CSharpName); } memberSource = ReflectionCreateObject(typeDesc.Type!); setSource(memberSource); } var collectionFixup = new CollectionFixup( memberSource, new XmlSerializationCollectionFixupCallback(GetCreateCollectionOfObjectsCallback(typeDesc.Type!)), memberValue); AddFixup(collectionFixup); return memberSource; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private XmlSerializationCollectionFixupCallback GetCreateCollectionOfObjectsCallback(Type collectionType) { return Wrapper; [RequiresUnreferencedCode("Calls AddObjectsIntoTargetCollection")] void Wrapper(object? collection, object? collectionItems) { if (collectionItems == null) return; if (collection == null) return; var listOfItems = new List<object?>(); if (collectionItems is IEnumerable enumerableItems) { foreach (var item in enumerableItems) { listOfItems.Add(item); } } else { throw new InvalidOperationException(SR.XmlInternalError); } AddObjectsIntoTargetCollection(collection, listOfItems, collectionType); } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteLiteralStructMethod(StructMapping structMapping, bool isNullable, bool checkType, string? defaultNamespace) { XmlQualifiedName? xsiType = checkType ? GetXsiType() : null; bool isNull = false; if (isNullable) { isNull = ReadNull(); } if (checkType) { if (structMapping.TypeDesc!.IsRoot && isNull) { if (xsiType != null) { return ReadTypedNull(xsiType); } else { if (structMapping.TypeDesc.IsValueType) { return ReflectionCreateObject(structMapping.TypeDesc.Type!); } else { return null; } } } object? o = null; if (xsiType == null || (!structMapping.TypeDesc.IsRoot && QNameEqual(xsiType, structMapping.TypeName, structMapping.Namespace, defaultNamespace))) { if (structMapping.TypeDesc.IsRoot) { return ReadTypedPrimitive(new XmlQualifiedName(Soap.UrType, XmlReservedNs.NsXs)); } } else if (WriteDerivedTypes(out o, structMapping, xsiType, defaultNamespace, checkType, isNullable)) { return o; } else if (structMapping.TypeDesc.IsRoot && WriteEnumAndArrayTypes(out o, structMapping, xsiType, defaultNamespace)) { return o; } else { if (structMapping.TypeDesc.IsRoot) return ReadTypedPrimitive(xsiType); else throw CreateUnknownTypeException(xsiType); } } if (structMapping.TypeDesc!.IsNullable && isNull) { return null; } else if (structMapping.TypeDesc.IsAbstract) { throw CreateAbstractTypeException(structMapping.TypeName!, structMapping.Namespace); } else { if (structMapping.TypeDesc.Type != null && typeof(XmlSchemaObject).IsAssignableFrom(structMapping.TypeDesc.Type)) { // https://github.com/dotnet/runtime/issues/1399: // To Support Serializing XmlSchemaObject throw new NotImplementedException(nameof(XmlSchemaObject)); } object? o = ReflectionCreateObject(structMapping.TypeDesc.Type!)!; MemberMapping[] mappings = TypeScope.GetSettableMembers(structMapping); MemberMapping? anyText = null; MemberMapping? anyElement = null; Member? anyAttribute = null; Member? anyElementMember = null; Member? anyTextMember = null; bool isSequence = structMapping.HasExplicitSequence(); if (isSequence) { // https://github.com/dotnet/runtime/issues/1402: // Currently the reflection based method treat this kind of type as normal types. // But potentially we can do some optimization for types that have ordered properties. } var allMembersList = new List<Member>(mappings.Length); var allMemberMappingList = new List<MemberMapping>(mappings.Length); for (int i = 0; i < mappings.Length; i++) { MemberMapping mapping = mappings[i]; var member = new Member(mapping); if (mapping.Text != null) { anyText = mapping; } if (mapping.Attribute != null) { member.Source = Wrapper; [RequiresUnreferencedCode("calls SetOrAddValueToMember")] void Wrapper(object? value) { SetOrAddValueToMember(o!, value!, member.Mapping.MemberInfo!); } if (mapping.Attribute.Any) { anyAttribute = member; } } if (!isSequence) { // find anyElement if present. for (int j = 0; j < mapping.Elements!.Length; j++) { if (mapping.Elements[j].Any && (mapping.Elements[j].Name == null || mapping.Elements[j].Name.Length == 0)) { anyElement = mapping; break; } } } else if (mapping.IsParticle && !mapping.IsSequence) { structMapping.FindDeclaringMapping(mapping, out StructMapping? declaringMapping, structMapping.TypeName!); throw new InvalidOperationException(SR.Format(SR.XmlSequenceHierarchy, structMapping.TypeDesc.FullName, mapping.Name, declaringMapping!.TypeDesc!.FullName, "Order")); } if (mapping.TypeDesc!.IsArrayLike) { if (member.Source == null && mapping.TypeDesc.IsArrayLike && !(mapping.Elements!.Length == 1 && mapping.Elements[0].Mapping is ArrayMapping)) { member.Source = (item) => { if (member.Collection == null) { member.Collection = new CollectionMember(); } member.Collection.Add(item); }; member.ArraySource = member.Source; } else if (!mapping.TypeDesc.IsArray) { } } if (member.Source == null) { var pi = member.Mapping.MemberInfo as PropertyInfo; if (pi != null && typeof(IList).IsAssignableFrom(pi.PropertyType) && (pi.SetMethod == null || !pi.SetMethod.IsPublic)) { member.Source = (value) => { var getOnlyList = (IList)pi.GetValue(o)!; if (value is IList valueList) { foreach (var v in valueList) { getOnlyList.Add(v); } } else { getOnlyList.Add(value); } }; } else { if (member.Mapping.Xmlns != null) { var xmlSerializerNamespaces = new XmlSerializerNamespaces(); var setMemberValue = GetSetMemberValueDelegate(o!, member.Mapping.Name); setMemberValue(o, xmlSerializerNamespaces); member.XmlnsSource = (ns, name) => { xmlSerializerNamespaces.Add(ns, name); }; } else { var setterDelegate = GetSetMemberValueDelegate(o!, member.Mapping.Name); member.Source = (value) => setterDelegate(o, value); } } } if (member.Mapping.CheckSpecified == SpecifiedAccessor.ReadWrite) { member.CheckSpecifiedSource = Wrapper; [RequiresUnreferencedCode("calls GetType on object")] void Wrapper(object? _) { string specifiedMemberName = $"{member.Mapping.Name}Specified"; MethodInfo? specifiedMethodInfo = o!.GetType().GetMethod($"set_{specifiedMemberName}"); if (specifiedMethodInfo != null) { specifiedMethodInfo.Invoke(o, new object[] { true }); } } } ChoiceIdentifierAccessor? choice = mapping.ChoiceIdentifier; if (choice != null && o != null) { member.ChoiceSource = Wrapper; [RequiresUnreferencedCode("Calls SetOrAddValueToMember")] void Wrapper(object elementNameObject) { string? elementName = elementNameObject as string; foreach (var name in choice.MemberIds!) { if (name == elementName) { object choiceValue = Enum.Parse(choice.Mapping!.TypeDesc!.Type!, name); SetOrAddValueToMember(o, choiceValue, choice.MemberInfo!); break; } } } } allMemberMappingList.Add(mapping); allMembersList.Add(member); if (mapping == anyElement) { anyElementMember = member; } else if (mapping == anyText) { anyTextMember = member; } } Member[] allMembers = allMembersList.ToArray(); UnknownNodeAction unknownNodeAction = (_) => UnknownNode(o); WriteAttributes(allMembers, anyAttribute, unknownNodeAction, ref o); Reader.MoveToElement(); if (Reader.IsEmptyElement) { Reader.Skip(); return o; } Reader.ReadStartElement(); bool IsSequenceAllMembers = IsSequence(allMembers); if (IsSequenceAllMembers) { // https://github.com/dotnet/runtime/issues/1402: // Currently the reflection based method treat this kind of type as normal types. // But potentially we can do some optimization for types that have ordered properties. } WriteMembers(ref o, allMembers, unknownNodeAction, unknownNodeAction, anyElementMember, anyTextMember); foreach (Member member in allMembers) { if (member.Collection != null) { MemberInfo[] memberInfos = o!.GetType().GetMember(member.Mapping.Name); MemberInfo memberInfo = memberInfos[0]; object? collection = null; SetCollectionObjectWithCollectionMember(ref collection, member.Collection, member.Mapping.TypeDesc!.Type!); var setMemberValue = GetSetMemberValueDelegate(o, memberInfo.Name); setMemberValue(o, collection); } } ReadEndElement(); return o; } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private bool WriteEnumAndArrayTypes(out object? o, StructMapping mapping, XmlQualifiedName xsiType, string? defaultNamespace) { foreach (var m in _mapping.Scope!.TypeMappings) { if (m is EnumMapping enumMapping) { if (QNameEqual(xsiType, enumMapping.TypeName, enumMapping.Namespace, defaultNamespace)) { Reader.ReadStartElement(); Func<object, string> functor = (state) => { var reader = (ReflectionXmlSerializationReader)state; return reader.CollapseWhitespace(reader.Reader.ReadString()); }; o = WriteEnumMethod(enumMapping, functor, this); ReadEndElement(); return true; } continue; } if (m is ArrayMapping arrayMapping) { if (QNameEqual(xsiType, arrayMapping.TypeName, arrayMapping.Namespace, defaultNamespace)) { o = WriteArray(arrayMapping, false, false, defaultNamespace); return true; } continue; } } o = null; return false; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private bool WriteDerivedTypes(out object? o, StructMapping mapping, XmlQualifiedName xsiType, string? defaultNamespace, bool checkType, bool isNullable) { for (StructMapping? derived = mapping.DerivedMappings; derived != null; derived = derived.NextDerivedMapping) { if (QNameEqual(xsiType, derived.TypeName, derived.Namespace, defaultNamespace)) { o = WriteStructMethod(derived, isNullable, checkType, defaultNamespace); return true; } if (WriteDerivedTypes(out o, derived, xsiType, defaultNamespace, checkType, isNullable)) { return true; } } o = null; return false; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteAttributes(Member[] members, Member? anyAttribute, UnknownNodeAction elseCall, ref object? o) { Member? xmlnsMember = null; var attributes = new List<AttributeAccessor>(); foreach (Member member in members) { if (member.Mapping.Xmlns != null) { xmlnsMember = member; break; } } while (Reader.MoveToNextAttribute()) { bool memberFound = false; foreach (Member member in members) { if (member.Mapping.Xmlns != null || member.Mapping.Ignore) { continue; } AttributeAccessor? attribute = member.Mapping.Attribute; if (attribute == null) continue; if (attribute.Any) continue; attributes.Add(attribute); if (attribute.IsSpecialXmlNamespace) { memberFound = XmlNodeEqual(Reader, attribute.Name, XmlReservedNs.NsXml); } else { memberFound = XmlNodeEqual(Reader, attribute.Name, attribute.Form == XmlSchemaForm.Qualified ? attribute.Namespace : string.Empty); } if (memberFound) { WriteAttribute(member); memberFound = true; break; } } if (memberFound) { continue; } bool flag2 = false; if (xmlnsMember != null) { if (IsXmlnsAttribute(Reader.Name)) { Debug.Assert(xmlnsMember.XmlnsSource != null, "Xmlns member's source was not set."); xmlnsMember.XmlnsSource(Reader.Name.Length == 5 ? string.Empty : Reader.LocalName, Reader.Value); } else { flag2 = true; } } else if (!IsXmlnsAttribute(Reader.Name)) { flag2 = true; } if (flag2) { if (anyAttribute != null) { var attr = (Document.ReadNode(Reader) as XmlAttribute)!; ParseWsdlArrayType(attr); WriteAttribute(anyAttribute, attr); } else { elseCall(o); } } } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteAttribute(Member member, object? attr = null) { AttributeAccessor attribute = member.Mapping.Attribute!; object? value = null; if (attribute.Mapping is SpecialMapping special) { if (special.TypeDesc!.Kind == TypeKind.Attribute) { value = attr; } else if (special.TypeDesc.CanBeAttributeValue) { // https://github.com/dotnet/runtime/issues/1398: // To Support special.TypeDesc.CanBeAttributeValue == true throw new NotImplementedException("special.TypeDesc.CanBeAttributeValue"); } else throw new InvalidOperationException(SR.XmlInternalError); } else { if (attribute.IsList) { string listValues = Reader.Value; string[] vals = listValues.Split(null); Array arrayValue = Array.CreateInstance(member.Mapping.TypeDesc!.Type!.GetElementType()!, vals.Length); for (int i = 0; i < vals.Length; i++) { arrayValue.SetValue(WritePrimitive(attribute.Mapping!, (state) => ((string[])state)[i], vals), i); } value = arrayValue; } else { value = WritePrimitive(attribute.Mapping!, (state) => ((XmlReader)state).Value, Reader); } } member.Source!(value); if (member.Mapping.CheckSpecified == SpecifiedAccessor.ReadWrite) { member.CheckSpecifiedSource?.Invoke(null); } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void SetOrAddValueToMember(object o, object value, MemberInfo memberInfo) { Type memberType = GetMemberType(memberInfo); if (memberType == value.GetType()) { var setMemberValue = GetSetMemberValueDelegate(o, memberInfo.Name); setMemberValue(o, value); } else if (memberType.IsArray) { AddItemInArrayMember(o, memberInfo, memberType, value); } else { var setMemberValue = GetSetMemberValueDelegate(o, memberInfo.Name); setMemberValue(o, value); } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void AddItemInArrayMember(object o, MemberInfo memberInfo, Type memberType, object item) { var currentArray = (Array?)GetMemberValue(o, memberInfo); int length; if (currentArray == null) { length = 0; } else { length = currentArray.Length; } var newArray = Array.CreateInstance(memberType.GetElementType()!, length + 1); if (currentArray != null) { Array.Copy(currentArray, newArray, length); } newArray.SetValue(item, length); var setMemberValue = GetSetMemberValueDelegate(o, memberInfo.Name); setMemberValue(o, newArray); } // WriteXmlNodeEqual private bool XmlNodeEqual(XmlReader source, string name, string? ns) { return source.LocalName == name && string.Equals(source.NamespaceURI, ns); } private bool QNameEqual(XmlQualifiedName xsiType, string? name, string? ns, string? defaultNamespace) { return xsiType.Name == name && string.Equals(xsiType.Namespace, defaultNamespace); } private void CreateUnknownNodeException(object? o) { CreateUnknownNodeException(); } internal sealed class CollectionMember : List<object?> { } internal sealed class Member { public MemberMapping Mapping; public CollectionMember? Collection; public int FixupIndex = -1; public bool MultiRef; public Action<object?>? Source; public Func<object?>? GetSource; public Action<object>? ArraySource; public Action<object?>? CheckSpecifiedSource; public Action<object>? ChoiceSource; public Action<string, string>? XmlnsSource; public Member(MemberMapping mapping) { Mapping = mapping; } } internal sealed class CheckTypeSource { public string? Id { get; set; } public bool IsObject { get; set; } public Type? Type { get; set; } public object? RefObject { get; set; } } internal sealed class ObjectHolder { public object? Object; } } internal static class ReflectionXmlSerializationReaderHelper { public delegate void SetMemberValueDelegate(object? o, object? val); public static SetMemberValueDelegate GetSetMemberValueDelegateWithType<TObj, TParam>(MemberInfo memberInfo) { if (typeof(TObj).IsValueType) { if (memberInfo is PropertyInfo propInfo) { return delegate (object? o, object? p) { propInfo.SetValue(o, p); }; } else if (memberInfo is FieldInfo fieldInfo) { return delegate (object? o, object? p) { fieldInfo.SetValue(o, p); }; } throw new InvalidOperationException(SR.XmlInternalError); } else { Action<TObj, TParam>? setTypedDelegate = null; if (memberInfo is PropertyInfo propInfo) { var setMethod = propInfo.GetSetMethod(true); if (setMethod == null) { return delegate (object? o, object? p) { // Maintain the same failure behavior as non-cached delegate propInfo.SetValue(o, p); }; } setTypedDelegate = (Action<TObj, TParam>)setMethod.CreateDelegate(typeof(Action<TObj, TParam>)); } else if (memberInfo is FieldInfo fieldInfo) { var objectParam = Expression.Parameter(typeof(TObj)); var valueParam = Expression.Parameter(typeof(TParam)); var fieldExpr = Expression.Field(objectParam, fieldInfo); var assignExpr = Expression.Assign(fieldExpr, valueParam); setTypedDelegate = Expression.Lambda<Action<TObj, TParam>>(assignExpr, objectParam, valueParam).Compile(); } return delegate (object? o, object? p) { setTypedDelegate!((TObj)o!, (TParam)p!); }; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections; using System.Collections.Concurrent; using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Linq.Expressions; using System.Reflection; using System.Runtime.CompilerServices; using System.Xml.Extensions; using System.Xml.Schema; // UnconditionalSuppressMessage that specify a Target need to be at the assembly or module level for now. Also, // they won't consider Target unless you also specify Scope to be either "member" or "type" [assembly: UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:RequiresUnreferencedCode", Target = "M:System.Xml.Serialization.ReflectionXmlSerializationReader.#cctor", Scope = "member", Justification = "The reason why this warns is because the two static properties call GetTypeDesc() which internally will call " + "ImportTypeDesc() when the passed in type is not considered a primitive type. That said, for both properties here we are passing in string " + "and XmlQualifiedName which are considered primitive, so they are trim safe.")] namespace System.Xml.Serialization { internal delegate void UnknownNodeAction(object? o); internal sealed class ReflectionXmlSerializationReader : XmlSerializationReader { private readonly XmlMapping _mapping; // Suppressed for the linker by the assembly-level UnconditionalSuppressMessageAttribute // https://github.com/dotnet/linker/issues/2648 #pragma warning disable IL2026 internal static TypeDesc StringTypeDesc { get; set; } = (new TypeScope()).GetTypeDesc(typeof(string)); internal static TypeDesc QnameTypeDesc { get; set; } = (new TypeScope()).GetTypeDesc(typeof(XmlQualifiedName)); #pragma warning restore IL2026 public ReflectionXmlSerializationReader(XmlMapping mapping, XmlReader xmlReader, XmlDeserializationEvents events, string? encodingStyle) { Init(xmlReader, events, encodingStyle, tempAssembly: null); _mapping = mapping; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] protected override void InitCallbacks() { TypeScope scope = _mapping.Scope!; foreach (TypeMapping mapping in scope.TypeMappings) { if (mapping.IsSoap && (mapping is StructMapping || mapping is EnumMapping || mapping is ArrayMapping || mapping is NullableMapping) && !mapping.TypeDesc!.IsRoot) { AddReadCallback( mapping.TypeName!, mapping.Namespace!, mapping.TypeDesc.Type!, CreateXmlSerializationReadCallback(mapping)); } } } protected override void InitIDs() { } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] public object? ReadObject() { XmlMapping xmlMapping = _mapping; if (!xmlMapping.IsReadable) return null; if (!xmlMapping.GenerateSerializer) throw new ArgumentException(SR.Format(SR.XmlInternalError, "xmlMapping")); if (xmlMapping is XmlTypeMapping xmlTypeMapping) { return GenerateTypeElement(xmlTypeMapping); } else if (xmlMapping is XmlMembersMapping xmlMembersMapping) { return GenerateMembersElement(xmlMembersMapping); } else { throw new ArgumentException(SR.Format(SR.XmlInternalError, "xmlMapping")); } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object GenerateMembersElement(XmlMembersMapping xmlMembersMapping) { if (xmlMembersMapping.Accessor.IsSoap) { return GenerateEncodedMembersElement(xmlMembersMapping); } else { return GenerateLiteralMembersElement(xmlMembersMapping); } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object GenerateLiteralMembersElement(XmlMembersMapping xmlMembersMapping) { ElementAccessor element = xmlMembersMapping.Accessor; MemberMapping[] mappings = ((MembersMapping)element.Mapping!).Members!; bool hasWrapperElement = ((MembersMapping)element.Mapping).HasWrapperElement; Reader.MoveToContent(); object[] p = new object[mappings.Length]; InitializeValueTypes(p, mappings); if (hasWrapperElement) { string elementName = element.Name; string elementNs = element.Form == XmlSchemaForm.Qualified ? element.Namespace! : string.Empty; Reader.MoveToContent(); while (Reader.NodeType != XmlNodeType.EndElement && Reader.NodeType != XmlNodeType.None) { if (Reader.IsStartElement(element.Name, elementNs)) { if (!GenerateLiteralMembersElementInternal(mappings, hasWrapperElement, p)) { continue; } ReadEndElement(); } else { UnknownNode(null, $"{elementNs}:{elementName}"); } Reader.MoveToContent(); } } else { GenerateLiteralMembersElementInternal(mappings, hasWrapperElement, p); } return p; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private bool GenerateLiteralMembersElementInternal(MemberMapping[] mappings, bool hasWrapperElement, object?[] p) { Member? anyText = null; Member? anyElement = null; Member? anyAttribute = null; var membersList = new List<Member>(); var textOrArrayMembersList = new List<Member>(); var attributeMembersList = new List<Member>(); for (int i = 0; i < mappings.Length; i++) { int index = i; MemberMapping mapping = mappings[index]; Action<object?> source = (o) => p[index] = o; Member member = new Member(mapping); Member anyMember = new Member(mapping); if (mapping.Xmlns != null) { var xmlns = new XmlSerializerNamespaces(); p[index] = xmlns; member.XmlnsSource = (ns, name) => xmlns.Add(ns, name); } member.Source = source; anyMember.Source = source; if (mapping.CheckSpecified == SpecifiedAccessor.ReadWrite) { string nameSpecified = $"{mapping.Name}Specified"; for (int j = 0; j < mappings.Length; j++) { if (mappings[j].Name == nameSpecified) { int indexJ = j; member.CheckSpecifiedSource = (o) => p[indexJ] = o; } } } bool foundAnyElement = false; if (mapping.Text != null) { anyText = anyMember; } if (mapping.Attribute != null && mapping.Attribute.Any) { anyMember.Collection = new CollectionMember(); anyMember.ArraySource = anyMember.Source; anyMember.Source = (item) => { anyMember.Collection.Add(item); }; anyAttribute = anyMember; } if (mapping.Attribute != null || mapping.Xmlns != null) { attributeMembersList.Add(member); } else if (mapping.Text != null) { textOrArrayMembersList.Add(member); } if (!mapping.IsSequence) { for (int j = 0; j < mapping.Elements!.Length; j++) { if (mapping.Elements[j].Any && mapping.Elements[j].Name.Length == 0) { anyElement = anyMember; if (mapping.Attribute == null && mapping.Text == null) { anyMember.Collection = new CollectionMember(); anyMember.ArraySource = (item) => { anyMember.Collection.Add(item); }; textOrArrayMembersList.Add(anyMember); } foundAnyElement = true; break; } } } if (mapping.Attribute != null || mapping.Text != null || foundAnyElement) { membersList.Add(anyMember); } else if (mapping.TypeDesc!.IsArrayLike && !(mapping.Elements!.Length == 1 && mapping.Elements[0].Mapping is ArrayMapping)) { anyMember.Collection = new CollectionMember(); anyMember.ArraySource = (item) => { anyMember.Collection.Add(item); }; membersList.Add(anyMember); textOrArrayMembersList.Add(anyMember); } else { membersList.Add(member); } } Member[] members = membersList.ToArray(); Member[] textOrArrayMembers = textOrArrayMembersList.ToArray(); if (members.Length > 0 && members[0].Mapping.IsReturnValue) IsReturnValue = true; if (attributeMembersList.Count > 0) { Member[] attributeMembers = attributeMembersList.ToArray(); object? tempObject = null; WriteAttributes(attributeMembers, anyAttribute, UnknownNode, ref tempObject); Reader.MoveToElement(); } if (hasWrapperElement) { if (Reader.IsEmptyElement) { Reader.Skip(); Reader.MoveToContent(); return false; } Reader.ReadStartElement(); } Reader.MoveToContent(); while (Reader.NodeType != XmlNodeType.EndElement && Reader.NodeType != XmlNodeType.None) { WriteMemberElements(members, UnknownNode, UnknownNode, anyElement, anyText, null); Reader.MoveToContent(); } foreach (Member member in textOrArrayMembers) { object? value = null; SetCollectionObjectWithCollectionMember(ref value, member.Collection!, member.Mapping.TypeDesc!.Type!); member.Source!(value); } if (anyAttribute != null) { object? value = null; SetCollectionObjectWithCollectionMember(ref value, anyAttribute.Collection!, anyAttribute.Mapping.TypeDesc!.Type!); anyAttribute.ArraySource!(value); } return true; } private void InitializeValueTypes(object?[] p, MemberMapping[] mappings) { for (int i = 0; i < mappings.Length; i++) { if (!mappings[i].TypeDesc!.IsValueType) continue; if (mappings[i].TypeDesc!.IsOptionalValue && mappings[i].TypeDesc!.BaseTypeDesc!.UseReflection) { p[i] = null; } else { p[i] = ReflectionCreateObject(mappings[i].TypeDesc!.Type!); } } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object GenerateEncodedMembersElement(XmlMembersMapping xmlMembersMapping) { ElementAccessor element = xmlMembersMapping.Accessor; var membersMapping = (MembersMapping)element.Mapping!; MemberMapping[] mappings = membersMapping.Members!; bool hasWrapperElement = membersMapping.HasWrapperElement; bool writeAccessors = membersMapping.WriteAccessors; Reader.MoveToContent(); object?[] p = new object[mappings.Length]; InitializeValueTypes(p, mappings); bool isEmptyWrapper = true; if (hasWrapperElement) { Reader.MoveToContent(); while (Reader.NodeType == XmlNodeType.Element) { string? root = Reader.GetAttribute("root", Soap.Encoding); if (root == null || XmlConvert.ToBoolean(root)) break; ReadReferencedElement(); Reader.MoveToContent(); } if (membersMapping.ValidateRpcWrapperElement) { string name = element.Name; string? ns = element.Form == XmlSchemaForm.Qualified ? element.Namespace : string.Empty; if (!XmlNodeEqual(Reader, name, ns)) { throw CreateUnknownNodeException(); } } isEmptyWrapper = Reader.IsEmptyElement; Reader.ReadStartElement(); } Member[] members = new Member[mappings.Length]; for (int i = 0; i < mappings.Length; i++) { int index = i; MemberMapping mapping = mappings[index]; var member = new Member(mapping); member.Source = (value) => p[index] = value; members[index] = member; if (mapping.CheckSpecified == SpecifiedAccessor.ReadWrite) { string nameSpecified = $"{mapping.Name}Specified"; for (int j = 0; j < mappings.Length; j++) { if (mappings[j].Name == nameSpecified) { int indexOfSpecifiedMember = j; member.CheckSpecifiedSource = (value) => p[indexOfSpecifiedMember] = value; break; } } } } Fixup? fixup = WriteMemberFixupBegin(members, p); if (members.Length > 0 && members[0].Mapping.IsReturnValue) { IsReturnValue = true; } List<CheckTypeSource>? checkTypeHrefSource = null; if (!hasWrapperElement && !writeAccessors) { checkTypeHrefSource = new List<CheckTypeSource>(); } Reader.MoveToContent(); while (Reader.NodeType != XmlNodeType.EndElement && Reader.NodeType != XmlNodeType.None) { UnknownNodeAction unrecognizedElementSource; if (checkTypeHrefSource == null) { unrecognizedElementSource = (_) => UnknownNode(p); } else { unrecognizedElementSource = Wrapper; [RequiresUnreferencedCode("calls ReadReferencedElement")] void Wrapper(object? _) { if (Reader.GetAttribute("id", null) != null) { ReadReferencedElement(); } else { UnknownNode(p); } } } WriteMemberElements(members, unrecognizedElementSource, (_) => UnknownNode(p), null, null, fixup: fixup, checkTypeHrefsSource: checkTypeHrefSource); Reader.MoveToContent(); } if (!isEmptyWrapper) { ReadEndElement(); } if (checkTypeHrefSource != null) { foreach (CheckTypeSource currentySource in checkTypeHrefSource) { bool isReferenced = true; bool isObject = currentySource.IsObject; object? refObj = isObject ? currentySource.RefObject : GetTarget((string)currentySource.RefObject!); if (refObj == null) { continue; } var checkTypeSource = new CheckTypeSource() { RefObject = refObj, Type = refObj.GetType(), Id = null }; WriteMemberElementsIf(members, null, (_) => isReferenced = false, fixup, checkTypeSource); if (isObject && isReferenced) { Referenced(refObj); } } } ReadReferencedElements(); return p; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? GenerateTypeElement(XmlTypeMapping xmlTypeMapping) { ElementAccessor element = xmlTypeMapping.Accessor; TypeMapping mapping = element.Mapping!; Reader.MoveToContent(); var memberMapping = new MemberMapping(); memberMapping.TypeDesc = mapping.TypeDesc; memberMapping.Elements = new ElementAccessor[] { element }; object? o = null; var holder = new ObjectHolder(); var member = new Member(memberMapping); member.Source = (value) => holder.Object = value; member.GetSource = () => holder.Object; UnknownNodeAction elementElseAction = CreateUnknownNodeException; UnknownNodeAction elseAction = UnknownNode; WriteMemberElements(new Member[] { member }, elementElseAction, elseAction, element.Any ? member : null, null); o = holder.Object; if (element.IsSoap) { Referenced(o); ReadReferencedElements(); } return o; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteMemberElements(Member[] expectedMembers, UnknownNodeAction elementElseAction, UnknownNodeAction elseAction, Member? anyElement, Member? anyText, Fixup? fixup = null, List<CheckTypeSource>? checkTypeHrefsSource = null) { bool checkType = checkTypeHrefsSource != null; if (Reader.NodeType == XmlNodeType.Element) { if (checkType) { if (Reader.GetAttribute("root", Soap.Encoding) == "0") { elementElseAction(null); return; } WriteMemberElementsCheckType(checkTypeHrefsSource!); } else { WriteMemberElementsIf(expectedMembers, anyElement, elementElseAction, fixup: fixup); } } else if (anyText != null && anyText.Mapping != null && WriteMemberText(anyText)) { } else { ProcessUnknownNode(elseAction); } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteMemberElementsCheckType(List<CheckTypeSource> checkTypeHrefsSource) { object? RefElememnt = ReadReferencingElement(null, null, true, out string? refElemId); var source = new CheckTypeSource(); if (refElemId != null) { source.RefObject = refElemId; source.IsObject = false; checkTypeHrefsSource.Add(source); } else if (RefElememnt != null) { source.RefObject = RefElememnt; source.IsObject = true; checkTypeHrefsSource.Add(source); } } private void ProcessUnknownNode(UnknownNodeAction action) { action?.Invoke(null); } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteMembers(ref object? o, Member[] members, UnknownNodeAction elementElseAction, UnknownNodeAction elseAction, Member? anyElement, Member? anyText) { Reader.MoveToContent(); while (Reader.NodeType != XmlNodeType.EndElement && Reader.NodeType != XmlNodeType.None) { WriteMemberElements(members, elementElseAction, elseAction, anyElement, anyText); Reader.MoveToContent(); } } private void SetCollectionObjectWithCollectionMember([NotNull] ref object? collection, CollectionMember collectionMember, [DynamicallyAccessedMembers(TrimmerConstants.AllMethods)] Type collectionType) { if (collectionType.IsArray) { Array a; if (collection is Array currentArray && currentArray.Length == collectionMember.Count) { a = currentArray; } else { Type elementType = collectionType.GetElementType()!; a = Array.CreateInstance(elementType, collectionMember.Count); } for (int i = 0; i < collectionMember.Count; i++) { a.SetValue(collectionMember[i], i); } collection = a; } else { if (collection == null) { collection = ReflectionCreateObject(collectionType)!; } AddObjectsIntoTargetCollection(collection, collectionMember, collectionType); } } private static void AddObjectsIntoTargetCollection(object targetCollection, List<object?> sourceCollection, [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicMethods)] Type targetCollectionType) { if (targetCollection is IList targetList) { foreach (object? item in sourceCollection) { targetList.Add(item); } } else { MethodInfo? addMethod = targetCollectionType.GetMethod("Add"); if (addMethod == null) { throw new InvalidOperationException(SR.XmlInternalError); } object?[] arguments = new object?[1]; foreach (object? item in sourceCollection) { arguments[0] = item; addMethod.Invoke(targetCollection, arguments); } } } private static readonly ContextAwareTables<Hashtable> s_setMemberValueDelegateCache = new ContextAwareTables<Hashtable>(); [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private static ReflectionXmlSerializationReaderHelper.SetMemberValueDelegate GetSetMemberValueDelegate(object o, string memberName) { Debug.Assert(o != null, "Object o should not be null"); Debug.Assert(!string.IsNullOrEmpty(memberName), "memberName must have a value"); Type type = o.GetType(); var delegateCacheForType = s_setMemberValueDelegateCache.GetOrCreateValue(type, () => new Hashtable()); var result = delegateCacheForType[memberName]; if (result == null) { lock (delegateCacheForType) { if ((result = delegateCacheForType[memberName]) == null) { MemberInfo memberInfo = ReflectionXmlSerializationHelper.GetEffectiveSetInfo(o.GetType(), memberName); Debug.Assert(memberInfo != null, "memberInfo could not be retrieved"); Type memberType; if (memberInfo is PropertyInfo propInfo) { memberType = propInfo.PropertyType; } else if (memberInfo is FieldInfo fieldInfo) { memberType = fieldInfo.FieldType; } else { throw new InvalidOperationException(SR.XmlInternalError); } MethodInfo getSetMemberValueDelegateWithTypeGenericMi = typeof(ReflectionXmlSerializationReaderHelper).GetMethod("GetSetMemberValueDelegateWithType", BindingFlags.Static | BindingFlags.Public)!; MethodInfo getSetMemberValueDelegateWithTypeMi = getSetMemberValueDelegateWithTypeGenericMi.MakeGenericMethod(o.GetType(), memberType); var getSetMemberValueDelegateWithType = (Func<MemberInfo, ReflectionXmlSerializationReaderHelper.SetMemberValueDelegate>)getSetMemberValueDelegateWithTypeMi.CreateDelegate(typeof(Func<MemberInfo, ReflectionXmlSerializationReaderHelper.SetMemberValueDelegate>)); result = getSetMemberValueDelegateWithType(memberInfo); delegateCacheForType[memberName] = result; } } } return (ReflectionXmlSerializationReaderHelper.SetMemberValueDelegate)result; } private object? GetMemberValue(object o, MemberInfo memberInfo) { if (memberInfo is PropertyInfo propertyInfo) { return propertyInfo.GetValue(o); } else if (memberInfo is FieldInfo fieldInfo) { return fieldInfo.GetValue(o); } throw new InvalidOperationException(SR.XmlInternalError); } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private bool WriteMemberText(Member anyText) { object? value; MemberMapping anyTextMapping = anyText.Mapping; if ((Reader.NodeType == XmlNodeType.Text || Reader.NodeType == XmlNodeType.CDATA || Reader.NodeType == XmlNodeType.Whitespace || Reader.NodeType == XmlNodeType.SignificantWhitespace)) { TextAccessor text = anyTextMapping.Text!; if (text.Mapping is SpecialMapping special) { if (special.TypeDesc!.Kind == TypeKind.Node) { value = Document.CreateTextNode(Reader.ReadString()); } else { throw new InvalidOperationException(SR.XmlInternalError); } } else { if (anyTextMapping.TypeDesc!.IsArrayLike) { if (text.Mapping!.TypeDesc!.CollapseWhitespace) { value = CollapseWhitespace(Reader.ReadString()); } else { value = Reader.ReadString(); } } else { if (text.Mapping!.TypeDesc == StringTypeDesc || text.Mapping.TypeDesc!.FormatterName == "String") { value = ReadString(null, text.Mapping.TypeDesc.CollapseWhitespace); } else { value = WritePrimitive(text.Mapping, (state) => ((ReflectionXmlSerializationReader)state).Reader.ReadString(), this); } } } anyText.Source!(value); return true; } return false; } private bool IsSequence(Member[] members) { // https://github.com/dotnet/runtime/issues/1402: // Currently the reflection based method treat this kind of type as normal types. // But potentially we can do some optimization for types that have ordered properties. return false; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteMemberElementsIf(Member[] expectedMembers, Member? anyElementMember, UnknownNodeAction elementElseAction, Fixup? fixup = null, CheckTypeSource? checkTypeSource = null) { bool checkType = checkTypeSource != null; bool isSequence = IsSequence(expectedMembers); if (isSequence) { // https://github.com/dotnet/runtime/issues/1402: // Currently the reflection based method treat this kind of type as normal types. // But potentially we can do some optimization for types that have ordered properties. } ElementAccessor? e = null; Member? member = null; bool foundElement = false; int elementIndex = -1; foreach (Member m in expectedMembers) { if (m.Mapping.Xmlns != null) continue; if (m.Mapping.Ignore) continue; if (isSequence && (m.Mapping.IsText || m.Mapping.IsAttribute)) continue; for (int i = 0; i < m.Mapping.Elements!.Length; i++) { ElementAccessor ele = m.Mapping.Elements[i]; string? ns = ele.Form == XmlSchemaForm.Qualified ? ele.Namespace : string.Empty; if (checkType) { Type elementType; if (ele.Mapping is NullableMapping nullableMapping) { TypeDesc td = nullableMapping.BaseMapping!.TypeDesc!; elementType = td.Type!; } else { elementType = ele.Mapping!.TypeDesc!.Type!; } if (elementType.IsAssignableFrom(checkTypeSource!.Type)) { foundElement = true; } } else if (ele.Name == Reader.LocalName && ns == Reader.NamespaceURI) { foundElement = true; } if (foundElement) { e = ele; member = m; elementIndex = i; break; } } if (foundElement) break; } if (foundElement) { if (checkType) { member!.Source!(checkTypeSource!.RefObject!); if (member.FixupIndex >= 0) { fixup!.Ids![member.FixupIndex] = checkTypeSource.Id; } } else { string? ns = e!.Form == XmlSchemaForm.Qualified ? e.Namespace : string.Empty; bool isList = member!.Mapping.TypeDesc!.IsArrayLike && !member.Mapping.TypeDesc.IsArray; WriteElement(e, member.Mapping.CheckSpecified == SpecifiedAccessor.ReadWrite, isList && member.Mapping.TypeDesc.IsNullable, member.Mapping.ReadOnly, ns, member.FixupIndex, elementIndex, fixup, member); } } else { if (anyElementMember != null && anyElementMember.Mapping != null) { MemberMapping anyElement = anyElementMember.Mapping; member = anyElementMember; ElementAccessor[] elements = anyElement.Elements!; for (int i = 0; i < elements.Length; i++) { ElementAccessor element = elements[i]; if (element.Any && element.Name.Length == 0) { string? ns = element.Form == XmlSchemaForm.Qualified ? element.Namespace : string.Empty; WriteElement(element, anyElement.CheckSpecified == SpecifiedAccessor.ReadWrite, false, false, ns, fixup: fixup, member: member); break; } } } else { member = null; ProcessUnknownNode(elementElseAction); } } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteElement(ElementAccessor element, bool checkSpecified, bool checkForNull, bool readOnly, string? defaultNamespace, int fixupIndex = -1, int elementIndex = -1, Fixup? fixup = null, Member? member = null) { object? value = null; if (element.Mapping is ArrayMapping arrayMapping) { value = WriteArray(arrayMapping, readOnly, element.IsNullable, defaultNamespace, fixupIndex, fixup, member); } else if (element.Mapping is NullableMapping nullableMapping) { value = WriteNullableMethod(nullableMapping, true, defaultNamespace); } else if (!element.Mapping!.IsSoap && (element.Mapping is PrimitiveMapping)) { if (element.IsNullable && ReadNull()) { if (element.Mapping.TypeDesc!.IsValueType) { value = ReflectionCreateObject(element.Mapping.TypeDesc.Type!); } else { value = null; } } else if ((element.Default != null && element.Default != DBNull.Value && element.Mapping.TypeDesc!.IsValueType) && (Reader.IsEmptyElement)) { Reader.Skip(); } else if (element.Mapping.TypeDesc!.Type == typeof(TimeSpan) && Reader.IsEmptyElement) { Reader.Skip(); value = default(TimeSpan); } else if (element.Mapping.TypeDesc!.Type == typeof(DateTimeOffset) && Reader.IsEmptyElement) { Reader.Skip(); value = default(DateTimeOffset); } else { if (element.Mapping.TypeDesc == QnameTypeDesc) { value = ReadElementQualifiedName(); } else { if (element.Mapping.TypeDesc.FormatterName == "ByteArrayBase64") { value = ToByteArrayBase64(false); } else if (element.Mapping.TypeDesc.FormatterName == "ByteArrayHex") { value = ToByteArrayHex(false); } else { Func<object, string> readFunc = (state) => ((XmlReader)state).ReadElementContentAsString(); value = WritePrimitive(element.Mapping, readFunc, Reader); } } } } else if (element.Mapping is StructMapping || (element.Mapping.IsSoap && element.Mapping is PrimitiveMapping)) { TypeMapping mapping = element.Mapping; if (mapping.IsSoap) { object? rre = fixupIndex >= 0 ? ReadReferencingElement(mapping.TypeName, mapping.Namespace, out fixup!.Ids![fixupIndex]) : ReadReferencedElement(mapping.TypeName, mapping.Namespace); if (!mapping.TypeDesc!.IsValueType || rre != null) { value = rre; Referenced(value); } if (fixupIndex >= 0) { if (member == null) { throw new InvalidOperationException(SR.XmlInternalError); } member.Source!(value!); return value; } } else { if (checkForNull && (member!.Source == null && member.ArraySource == null)) { Reader.Skip(); } else { value = WriteStructMethod( mapping: (StructMapping)mapping, isNullable: mapping.TypeDesc!.IsNullable && element.IsNullable, checkType: true, defaultNamespace: defaultNamespace ); } } } else if (element.Mapping is SpecialMapping specialMapping) { switch (specialMapping.TypeDesc!.Kind) { case TypeKind.Node: bool isDoc = specialMapping.TypeDesc.FullName == typeof(XmlDocument).FullName; if (isDoc) { value = ReadXmlDocument(!element.Any); } else { value = ReadXmlNode(!element.Any); } break; case TypeKind.Serializable: SerializableMapping sm = (SerializableMapping)element.Mapping; // check to see if we need to do the derivation bool flag = true; if (sm.DerivedMappings != null) { XmlQualifiedName? tser = GetXsiType(); if (tser == null || QNameEqual(tser, sm.XsiType!.Name, sm.XsiType.Namespace, defaultNamespace)) { } else { flag = false; } } if (flag) { bool isWrappedAny = !element.Any && IsWildcard(sm); value = ReadSerializable((IXmlSerializable)ReflectionCreateObject(sm.TypeDesc!.Type!)!, isWrappedAny); } if (sm.DerivedMappings != null) { // https://github.com/dotnet/runtime/issues/1401: // To Support SpecialMapping Types Having DerivedMappings throw new NotImplementedException("sm.DerivedMappings != null"); //WriteDerivedSerializable(sm, sm, source, isWrappedAny); //WriteUnknownNode("UnknownNode", "null", null, true); } break; default: throw new InvalidOperationException(SR.XmlInternalError); } } else { throw new InvalidOperationException(SR.XmlInternalError); } member?.ChoiceSource?.Invoke(element.Name); if (member?.ArraySource != null) { member?.ArraySource(value!); } else { member?.Source?.Invoke(value!); member?.CheckSpecifiedSource?.Invoke(true); } return value; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private XmlSerializationReadCallback CreateXmlSerializationReadCallback(TypeMapping mapping) { if (mapping is StructMapping structMapping) { [RequiresUnreferencedCode("calls WriteStructMethod")] object? WriteStruct() => WriteStructMethod(structMapping, mapping.TypeDesc!.IsNullable, true, defaultNamespace: null); return WriteStruct; } else if (mapping is EnumMapping enumMapping) { return () => WriteEnumMethodSoap(enumMapping); } else if (mapping is NullableMapping nullableMapping) { [RequiresUnreferencedCode("calls WriteNullableMethod")] object? Wrapper() => WriteNullableMethod(nullableMapping, false, null); return Wrapper; } return DummyReadArrayMethod; } private static void NoopAction(object? o) { } private object? DummyReadArrayMethod() { UnknownNode(null); return null; } private static Type GetMemberType(MemberInfo memberInfo) { Type memberType; if (memberInfo is FieldInfo fieldInfo) { memberType = fieldInfo.FieldType; } else if (memberInfo is PropertyInfo propertyInfo) { memberType = propertyInfo.PropertyType; } else { throw new InvalidOperationException(SR.XmlInternalError); } return memberType; } private static bool IsWildcard(SpecialMapping mapping) { if (mapping is SerializableMapping serializableMapping) return serializableMapping.IsAny; return mapping.TypeDesc!.CanBeElementValue; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteArray(ArrayMapping arrayMapping, bool readOnly, bool isNullable, string? defaultNamespace, int fixupIndex = -1, Fixup? fixup = null, Member? member = null) { object? o = null; if (arrayMapping.IsSoap) { object? rre; if (fixupIndex >= 0) { rre = ReadReferencingElement(arrayMapping.TypeName, arrayMapping.Namespace, out fixup!.Ids![fixupIndex]); } else { rre = ReadReferencedElement(arrayMapping.TypeName, arrayMapping.Namespace); } TypeDesc td = arrayMapping.TypeDesc!; if (rre != null) { if (td.IsEnumerable || td.IsCollection) { WriteAddCollectionFixup(member!.GetSource!, member.Source!, rre, td, readOnly); // member.Source has been set at this point. // Setting the source to no-op to avoid setting the // source again. member.Source = NoopAction; } else { if (member == null) { throw new InvalidOperationException(SR.XmlInternalError); } member.Source!(rre); } } o = rre; } else { if (!ReadNull()) { var memberMapping = new MemberMapping() { Elements = arrayMapping.Elements, TypeDesc = arrayMapping.TypeDesc, ReadOnly = readOnly }; Type collectionType = memberMapping.TypeDesc!.Type!; o = ReflectionCreateObject(memberMapping.TypeDesc.Type!); if (memberMapping.ChoiceIdentifier != null) { // https://github.com/dotnet/runtime/issues/1400: // To Support ArrayMapping Types Having ChoiceIdentifier throw new NotImplementedException("memberMapping.ChoiceIdentifier != null"); } var arrayMember = new Member(memberMapping); arrayMember.Collection = new CollectionMember(); arrayMember.ArraySource = (item) => { arrayMember.Collection.Add(item); }; if ((readOnly && o == null) || Reader.IsEmptyElement) { Reader.Skip(); } else { Reader.ReadStartElement(); Reader.MoveToContent(); while (Reader.NodeType != XmlNodeType.EndElement && Reader.NodeType != XmlNodeType.None) { WriteMemberElements(new Member[] { arrayMember }, UnknownNode, UnknownNode, null, null); Reader.MoveToContent(); } ReadEndElement(); } SetCollectionObjectWithCollectionMember(ref o, arrayMember.Collection, collectionType); } } return o; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object WritePrimitive(TypeMapping mapping, Func<object, string> readFunc, object funcState) { if (mapping is EnumMapping enumMapping) { return WriteEnumMethod(enumMapping, readFunc, funcState); } else if (mapping.TypeDesc == StringTypeDesc) { return readFunc(funcState); } else if (mapping.TypeDesc!.FormatterName == "String") { if (mapping.TypeDesc.CollapseWhitespace) { return CollapseWhitespace(readFunc(funcState)); } else { return readFunc(funcState); } } else { if (!mapping.TypeDesc.HasCustomFormatter) { string value = readFunc(funcState); object retObj = mapping.TypeDesc.FormatterName switch { "Boolean" => XmlConvert.ToBoolean(value), "Int32" => XmlConvert.ToInt32(value), "Int16" => XmlConvert.ToInt16(value), "Int64" => XmlConvert.ToInt64(value), "Single" => XmlConvert.ToSingle(value), "Double" => XmlConvert.ToDouble(value), "Decimal" => XmlConvert.ToDecimal(value), "Byte" => XmlConvert.ToByte(value), "SByte" => XmlConvert.ToSByte(value), "UInt16" => XmlConvert.ToUInt16(value), "UInt32" => XmlConvert.ToUInt32(value), "UInt64" => XmlConvert.ToUInt64(value), "Guid" => XmlConvert.ToGuid(value), "Char" => XmlConvert.ToChar(value), "TimeSpan" => XmlConvert.ToTimeSpan(value), "DateTimeOffset" => XmlConvert.ToDateTimeOffset(value), _ => throw new InvalidOperationException(SR.Format(SR.XmlInternalErrorDetails, $"unknown FormatterName: {mapping.TypeDesc.FormatterName}")), }; return retObj; } else { string methodName = $"To{mapping.TypeDesc.FormatterName}"; MethodInfo? method = typeof(XmlSerializationReader).GetMethod(methodName, BindingFlags.Static | BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic, new Type[] { typeof(string) }); if (method == null) { throw new InvalidOperationException(SR.Format(SR.XmlInternalErrorDetails, $"unknown FormatterName: {mapping.TypeDesc.FormatterName}")); } return method.Invoke(this, new object[] { readFunc(funcState) })!; } } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteStructMethod(StructMapping mapping, bool isNullable, bool checkType, string? defaultNamespace) { if (mapping.IsSoap) return WriteEncodedStructMethod(mapping); else return WriteLiteralStructMethod(mapping, isNullable, checkType, defaultNamespace); } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteNullableMethod(NullableMapping nullableMapping, bool checkType, string? defaultNamespace) { object? o = Activator.CreateInstance(nullableMapping.TypeDesc!.Type!); if (!ReadNull()) { ElementAccessor element = new ElementAccessor(); element.Mapping = nullableMapping.BaseMapping; element.Any = false; element.IsNullable = nullableMapping.BaseMapping!.TypeDesc!.IsNullable; o = WriteElement(element, false, false, false, defaultNamespace); } return o; } private object WriteEnumMethod(EnumMapping mapping, Func<object, string> readFunc, object funcState) { Debug.Assert(!mapping.IsSoap, "mapping.IsSoap was true. Use WriteEnumMethodSoap for reading SOAP encoded enum value."); string source = readFunc(funcState); return WriteEnumMethod(mapping, source); } private object WriteEnumMethodSoap(EnumMapping mapping) { string source = Reader.ReadElementString(); return WriteEnumMethod(mapping, source); } private object WriteEnumMethod(EnumMapping mapping, string source) { if (mapping.IsFlags) { Hashtable table = WriteHashtable(mapping, mapping.TypeDesc!.Name); return Enum.ToObject(mapping.TypeDesc.Type!, ToEnum(source, table, mapping.TypeDesc.Name)); } else { foreach (ConstantMapping c in mapping.Constants!) { if (string.Equals(c.XmlName, source)) { return Enum.Parse(mapping.TypeDesc!.Type!, c.Name); } } throw CreateUnknownConstantException(source, mapping.TypeDesc!.Type!); } } private Hashtable WriteHashtable(EnumMapping mapping, string name) { var h = new Hashtable(); ConstantMapping[] constants = mapping.Constants!; for (int i = 0; i < constants.Length; i++) { h.Add(constants[i].XmlName, constants[i].Value); } return h; } private object? ReflectionCreateObject( [DynamicallyAccessedMembers(TrimmerConstants.AllMethods)] Type type) { object? obj; if (type.IsArray) { obj = Activator.CreateInstance(type, 32); } else { ConstructorInfo? ci = GetDefaultConstructor(type); if (ci != null) { obj = ci.Invoke(Array.Empty<object>()); } else { obj = Activator.CreateInstance(type); } } return obj; } private ConstructorInfo? GetDefaultConstructor( [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.NonPublicConstructors)] Type type) => type.IsValueType ? null : type.GetConstructor(BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.Instance | BindingFlags.DeclaredOnly, null, Type.EmptyTypes, null); [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteEncodedStructMethod(StructMapping structMapping) { if (structMapping.TypeDesc!.IsRoot) return null; Member[]? members = null; if (structMapping.TypeDesc.IsAbstract) { throw CreateAbstractTypeException(structMapping.TypeName!, structMapping.Namespace); } else { object? o = ReflectionCreateObject(structMapping.TypeDesc.Type!); MemberMapping[] mappings = TypeScope.GetSettableMembers(structMapping); members = new Member[mappings.Length]; for (int i = 0; i < mappings.Length; i++) { MemberMapping mapping = mappings[i]; var member = new Member(mapping); TypeDesc td = member.Mapping.TypeDesc!; if (td.IsCollection || td.IsEnumerable) { member.Source = Wrapper; [RequiresUnreferencedCode("Calls WriteAddCollectionFixup")] void Wrapper(object? value) { WriteAddCollectionFixup(o!, member, value!); } } else if (!member.Mapping.ReadOnly) { var setterDelegate = GetSetMemberValueDelegate(o!, member.Mapping.MemberInfo!.Name); member.Source = (value) => setterDelegate(o, value); } else { member.Source = NoopAction; } members[i] = member; } Fixup? fixup = WriteMemberFixupBegin(members, o); UnknownNodeAction unknownNodeAction = (_) => UnknownNode(o); WriteAttributes(members, null, unknownNodeAction, ref o); Reader.MoveToElement(); if (Reader.IsEmptyElement) { Reader.Skip(); return o; } Reader.ReadStartElement(); Reader.MoveToContent(); while (Reader.NodeType != XmlNodeType.EndElement && Reader.NodeType != XmlNodeType.None) { WriteMemberElements(members, UnknownNode, UnknownNode, null, null, fixup: fixup); Reader.MoveToContent(); } ReadEndElement(); return o; } } private Fixup? WriteMemberFixupBegin(Member[] members, object? o) { int fixupCount = 0; foreach (Member member in members) { if (member.Mapping.Elements!.Length == 0) continue; TypeMapping? mapping = member.Mapping.Elements[0].Mapping; if (mapping is StructMapping || mapping is ArrayMapping || mapping is PrimitiveMapping || mapping is NullableMapping) { member.MultiRef = true; member.FixupIndex = fixupCount++; } } Fixup? fixup; if (fixupCount > 0) { fixup = new Fixup(o, CreateWriteFixupMethod(members), fixupCount); AddFixup(fixup); } else { fixup = null; } return fixup; } private XmlSerializationFixupCallback CreateWriteFixupMethod(Member[] members) { return (fixupObject) => { var fixup = (Fixup)fixupObject; string[] ids = fixup.Ids!; foreach (Member member in members) { if (member.MultiRef) { int fixupIndex = member.FixupIndex; if (ids[fixupIndex] != null) { var memberValue = GetTarget(ids[fixupIndex]); member.Source!(memberValue); } } } }; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteAddCollectionFixup(object o, Member member, object memberValue) { TypeDesc typeDesc = member.Mapping.TypeDesc!; bool readOnly = member.Mapping.ReadOnly; Func<object?> getSource = () => GetMemberValue(o, member.Mapping.MemberInfo!); var setterDelegate = GetSetMemberValueDelegate(o, member.Mapping.MemberInfo!.Name); Action<object?> setSource = (value) => setterDelegate(o, value); WriteAddCollectionFixup(getSource, setSource, memberValue, typeDesc, readOnly); } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteAddCollectionFixup(Func<object?> getSource, Action<object?> setSource, object memberValue, TypeDesc typeDesc, bool readOnly) { object? memberSource = getSource(); if (memberSource == null) { if (readOnly) { throw CreateReadOnlyCollectionException(typeDesc.CSharpName); } memberSource = ReflectionCreateObject(typeDesc.Type!); setSource(memberSource); } var collectionFixup = new CollectionFixup( memberSource, new XmlSerializationCollectionFixupCallback(GetCreateCollectionOfObjectsCallback(typeDesc.Type!)), memberValue); AddFixup(collectionFixup); return memberSource; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private XmlSerializationCollectionFixupCallback GetCreateCollectionOfObjectsCallback(Type collectionType) { return Wrapper; [RequiresUnreferencedCode("Calls AddObjectsIntoTargetCollection")] void Wrapper(object? collection, object? collectionItems) { if (collectionItems == null) return; if (collection == null) return; var listOfItems = new List<object?>(); if (collectionItems is IEnumerable enumerableItems) { foreach (var item in enumerableItems) { listOfItems.Add(item); } } else { throw new InvalidOperationException(SR.XmlInternalError); } AddObjectsIntoTargetCollection(collection, listOfItems, collectionType); } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private object? WriteLiteralStructMethod(StructMapping structMapping, bool isNullable, bool checkType, string? defaultNamespace) { XmlQualifiedName? xsiType = checkType ? GetXsiType() : null; bool isNull = false; if (isNullable) { isNull = ReadNull(); } if (checkType) { if (structMapping.TypeDesc!.IsRoot && isNull) { if (xsiType != null) { return ReadTypedNull(xsiType); } else { if (structMapping.TypeDesc.IsValueType) { return ReflectionCreateObject(structMapping.TypeDesc.Type!); } else { return null; } } } object? o = null; if (xsiType == null || (!structMapping.TypeDesc.IsRoot && QNameEqual(xsiType, structMapping.TypeName, structMapping.Namespace, defaultNamespace))) { if (structMapping.TypeDesc.IsRoot) { return ReadTypedPrimitive(new XmlQualifiedName(Soap.UrType, XmlReservedNs.NsXs)); } } else if (WriteDerivedTypes(out o, structMapping, xsiType, defaultNamespace, checkType, isNullable)) { return o; } else if (structMapping.TypeDesc.IsRoot && WriteEnumAndArrayTypes(out o, structMapping, xsiType, defaultNamespace)) { return o; } else { if (structMapping.TypeDesc.IsRoot) return ReadTypedPrimitive(xsiType); else throw CreateUnknownTypeException(xsiType); } } if (structMapping.TypeDesc!.IsNullable && isNull) { return null; } else if (structMapping.TypeDesc.IsAbstract) { throw CreateAbstractTypeException(structMapping.TypeName!, structMapping.Namespace); } else { if (structMapping.TypeDesc.Type != null && typeof(XmlSchemaObject).IsAssignableFrom(structMapping.TypeDesc.Type)) { // https://github.com/dotnet/runtime/issues/1399: // To Support Serializing XmlSchemaObject throw new NotImplementedException(nameof(XmlSchemaObject)); } object? o = ReflectionCreateObject(structMapping.TypeDesc.Type!)!; MemberMapping[] mappings = TypeScope.GetSettableMembers(structMapping); MemberMapping? anyText = null; MemberMapping? anyElement = null; Member? anyAttribute = null; Member? anyElementMember = null; Member? anyTextMember = null; bool isSequence = structMapping.HasExplicitSequence(); if (isSequence) { // https://github.com/dotnet/runtime/issues/1402: // Currently the reflection based method treat this kind of type as normal types. // But potentially we can do some optimization for types that have ordered properties. } var allMembersList = new List<Member>(mappings.Length); var allMemberMappingList = new List<MemberMapping>(mappings.Length); for (int i = 0; i < mappings.Length; i++) { MemberMapping mapping = mappings[i]; var member = new Member(mapping); if (mapping.Text != null) { anyText = mapping; } if (mapping.Attribute != null) { member.Source = Wrapper; [RequiresUnreferencedCode("calls SetOrAddValueToMember")] void Wrapper(object? value) { SetOrAddValueToMember(o!, value!, member.Mapping.MemberInfo!); } if (mapping.Attribute.Any) { anyAttribute = member; } } if (!isSequence) { // find anyElement if present. for (int j = 0; j < mapping.Elements!.Length; j++) { if (mapping.Elements[j].Any && (mapping.Elements[j].Name == null || mapping.Elements[j].Name.Length == 0)) { anyElement = mapping; break; } } } else if (mapping.IsParticle && !mapping.IsSequence) { structMapping.FindDeclaringMapping(mapping, out StructMapping? declaringMapping, structMapping.TypeName!); throw new InvalidOperationException(SR.Format(SR.XmlSequenceHierarchy, structMapping.TypeDesc.FullName, mapping.Name, declaringMapping!.TypeDesc!.FullName, "Order")); } if (mapping.TypeDesc!.IsArrayLike) { if (member.Source == null && mapping.TypeDesc.IsArrayLike && !(mapping.Elements!.Length == 1 && mapping.Elements[0].Mapping is ArrayMapping)) { member.Source = (item) => { if (member.Collection == null) { member.Collection = new CollectionMember(); } member.Collection.Add(item); }; member.ArraySource = member.Source; } else if (!mapping.TypeDesc.IsArray) { } } if (member.Source == null) { var pi = member.Mapping.MemberInfo as PropertyInfo; if (pi != null && typeof(IList).IsAssignableFrom(pi.PropertyType) && (pi.SetMethod == null || !pi.SetMethod.IsPublic)) { member.Source = (value) => { var getOnlyList = (IList)pi.GetValue(o)!; if (value is IList valueList) { foreach (var v in valueList) { getOnlyList.Add(v); } } else { getOnlyList.Add(value); } }; } else { if (member.Mapping.Xmlns != null) { var xmlSerializerNamespaces = new XmlSerializerNamespaces(); var setMemberValue = GetSetMemberValueDelegate(o!, member.Mapping.Name); setMemberValue(o, xmlSerializerNamespaces); member.XmlnsSource = (ns, name) => { xmlSerializerNamespaces.Add(ns, name); }; } else { var setterDelegate = GetSetMemberValueDelegate(o!, member.Mapping.Name); member.Source = (value) => setterDelegate(o, value); } } } if (member.Mapping.CheckSpecified == SpecifiedAccessor.ReadWrite) { member.CheckSpecifiedSource = Wrapper; [RequiresUnreferencedCode("calls GetType on object")] void Wrapper(object? _) { string specifiedMemberName = $"{member.Mapping.Name}Specified"; MethodInfo? specifiedMethodInfo = o!.GetType().GetMethod($"set_{specifiedMemberName}"); if (specifiedMethodInfo != null) { specifiedMethodInfo.Invoke(o, new object[] { true }); } } } ChoiceIdentifierAccessor? choice = mapping.ChoiceIdentifier; if (choice != null && o != null) { member.ChoiceSource = Wrapper; [RequiresUnreferencedCode("Calls SetOrAddValueToMember")] void Wrapper(object elementNameObject) { string? elementName = elementNameObject as string; foreach (var name in choice.MemberIds!) { if (name == elementName) { object choiceValue = Enum.Parse(choice.Mapping!.TypeDesc!.Type!, name); SetOrAddValueToMember(o, choiceValue, choice.MemberInfo!); break; } } } } allMemberMappingList.Add(mapping); allMembersList.Add(member); if (mapping == anyElement) { anyElementMember = member; } else if (mapping == anyText) { anyTextMember = member; } } Member[] allMembers = allMembersList.ToArray(); UnknownNodeAction unknownNodeAction = (_) => UnknownNode(o); WriteAttributes(allMembers, anyAttribute, unknownNodeAction, ref o); Reader.MoveToElement(); if (Reader.IsEmptyElement) { Reader.Skip(); return o; } Reader.ReadStartElement(); bool IsSequenceAllMembers = IsSequence(allMembers); if (IsSequenceAllMembers) { // https://github.com/dotnet/runtime/issues/1402: // Currently the reflection based method treat this kind of type as normal types. // But potentially we can do some optimization for types that have ordered properties. } WriteMembers(ref o, allMembers, unknownNodeAction, unknownNodeAction, anyElementMember, anyTextMember); foreach (Member member in allMembers) { if (member.Collection != null) { MemberInfo[] memberInfos = o!.GetType().GetMember(member.Mapping.Name); MemberInfo memberInfo = memberInfos[0]; object? collection = null; SetCollectionObjectWithCollectionMember(ref collection, member.Collection, member.Mapping.TypeDesc!.Type!); var setMemberValue = GetSetMemberValueDelegate(o, memberInfo.Name); setMemberValue(o, collection); } } ReadEndElement(); return o; } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private bool WriteEnumAndArrayTypes(out object? o, StructMapping mapping, XmlQualifiedName xsiType, string? defaultNamespace) { foreach (var m in _mapping.Scope!.TypeMappings) { if (m is EnumMapping enumMapping) { if (QNameEqual(xsiType, enumMapping.TypeName, enumMapping.Namespace, defaultNamespace)) { Reader.ReadStartElement(); Func<object, string> functor = (state) => { var reader = (ReflectionXmlSerializationReader)state; return reader.CollapseWhitespace(reader.Reader.ReadString()); }; o = WriteEnumMethod(enumMapping, functor, this); ReadEndElement(); return true; } continue; } if (m is ArrayMapping arrayMapping) { if (QNameEqual(xsiType, arrayMapping.TypeName, arrayMapping.Namespace, defaultNamespace)) { o = WriteArray(arrayMapping, false, false, defaultNamespace); return true; } continue; } } o = null; return false; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private bool WriteDerivedTypes(out object? o, StructMapping mapping, XmlQualifiedName xsiType, string? defaultNamespace, bool checkType, bool isNullable) { for (StructMapping? derived = mapping.DerivedMappings; derived != null; derived = derived.NextDerivedMapping) { if (QNameEqual(xsiType, derived.TypeName, derived.Namespace, defaultNamespace)) { o = WriteStructMethod(derived, isNullable, checkType, defaultNamespace); return true; } if (WriteDerivedTypes(out o, derived, xsiType, defaultNamespace, checkType, isNullable)) { return true; } } o = null; return false; } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteAttributes(Member[] members, Member? anyAttribute, UnknownNodeAction elseCall, ref object? o) { Member? xmlnsMember = null; var attributes = new List<AttributeAccessor>(); foreach (Member member in members) { if (member.Mapping.Xmlns != null) { xmlnsMember = member; break; } } while (Reader.MoveToNextAttribute()) { bool memberFound = false; foreach (Member member in members) { if (member.Mapping.Xmlns != null || member.Mapping.Ignore) { continue; } AttributeAccessor? attribute = member.Mapping.Attribute; if (attribute == null) continue; if (attribute.Any) continue; attributes.Add(attribute); if (attribute.IsSpecialXmlNamespace) { memberFound = XmlNodeEqual(Reader, attribute.Name, XmlReservedNs.NsXml); } else { memberFound = XmlNodeEqual(Reader, attribute.Name, attribute.Form == XmlSchemaForm.Qualified ? attribute.Namespace : string.Empty); } if (memberFound) { WriteAttribute(member); memberFound = true; break; } } if (memberFound) { continue; } bool flag2 = false; if (xmlnsMember != null) { if (IsXmlnsAttribute(Reader.Name)) { Debug.Assert(xmlnsMember.XmlnsSource != null, "Xmlns member's source was not set."); xmlnsMember.XmlnsSource(Reader.Name.Length == 5 ? string.Empty : Reader.LocalName, Reader.Value); } else { flag2 = true; } } else if (!IsXmlnsAttribute(Reader.Name)) { flag2 = true; } if (flag2) { if (anyAttribute != null) { var attr = (Document.ReadNode(Reader) as XmlAttribute)!; ParseWsdlArrayType(attr); WriteAttribute(anyAttribute, attr); } else { elseCall(o); } } } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void WriteAttribute(Member member, object? attr = null) { AttributeAccessor attribute = member.Mapping.Attribute!; object? value = null; if (attribute.Mapping is SpecialMapping special) { if (special.TypeDesc!.Kind == TypeKind.Attribute) { value = attr; } else if (special.TypeDesc.CanBeAttributeValue) { // https://github.com/dotnet/runtime/issues/1398: // To Support special.TypeDesc.CanBeAttributeValue == true throw new NotImplementedException("special.TypeDesc.CanBeAttributeValue"); } else throw new InvalidOperationException(SR.XmlInternalError); } else { if (attribute.IsList) { string listValues = Reader.Value; string[] vals = listValues.Split(null); Array arrayValue = Array.CreateInstance(member.Mapping.TypeDesc!.Type!.GetElementType()!, vals.Length); for (int i = 0; i < vals.Length; i++) { arrayValue.SetValue(WritePrimitive(attribute.Mapping!, (state) => ((string[])state)[i], vals), i); } value = arrayValue; } else { value = WritePrimitive(attribute.Mapping!, (state) => ((XmlReader)state).Value, Reader); } } member.Source!(value); if (member.Mapping.CheckSpecified == SpecifiedAccessor.ReadWrite) { member.CheckSpecifiedSource?.Invoke(null); } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void SetOrAddValueToMember(object o, object value, MemberInfo memberInfo) { Type memberType = GetMemberType(memberInfo); if (memberType == value.GetType()) { var setMemberValue = GetSetMemberValueDelegate(o, memberInfo.Name); setMemberValue(o, value); } else if (memberType.IsArray) { AddItemInArrayMember(o, memberInfo, memberType, value); } else { var setMemberValue = GetSetMemberValueDelegate(o, memberInfo.Name); setMemberValue(o, value); } } [RequiresUnreferencedCode(XmlSerializer.TrimSerializationWarning)] private void AddItemInArrayMember(object o, MemberInfo memberInfo, Type memberType, object item) { var currentArray = (Array?)GetMemberValue(o, memberInfo); int length; if (currentArray == null) { length = 0; } else { length = currentArray.Length; } var newArray = Array.CreateInstance(memberType.GetElementType()!, length + 1); if (currentArray != null) { Array.Copy(currentArray, newArray, length); } newArray.SetValue(item, length); var setMemberValue = GetSetMemberValueDelegate(o, memberInfo.Name); setMemberValue(o, newArray); } // WriteXmlNodeEqual private bool XmlNodeEqual(XmlReader source, string name, string? ns) { return source.LocalName == name && string.Equals(source.NamespaceURI, ns); } private bool QNameEqual(XmlQualifiedName xsiType, string? name, string? ns, string? defaultNamespace) { return xsiType.Name == name && string.Equals(xsiType.Namespace, defaultNamespace); } private void CreateUnknownNodeException(object? o) { CreateUnknownNodeException(); } internal sealed class CollectionMember : List<object?> { } internal sealed class Member { public MemberMapping Mapping; public CollectionMember? Collection; public int FixupIndex = -1; public bool MultiRef; public Action<object?>? Source; public Func<object?>? GetSource; public Action<object>? ArraySource; public Action<object?>? CheckSpecifiedSource; public Action<object>? ChoiceSource; public Action<string, string>? XmlnsSource; public Member(MemberMapping mapping) { Mapping = mapping; } } internal sealed class CheckTypeSource { public string? Id { get; set; } public bool IsObject { get; set; } public Type? Type { get; set; } public object? RefObject { get; set; } } internal sealed class ObjectHolder { public object? Object; } } internal static class ReflectionXmlSerializationReaderHelper { public delegate void SetMemberValueDelegate(object? o, object? val); public static SetMemberValueDelegate GetSetMemberValueDelegateWithType<TObj, TParam>(MemberInfo memberInfo) { if (typeof(TObj).IsValueType) { if (memberInfo is PropertyInfo propInfo) { return delegate (object? o, object? p) { propInfo.SetValue(o, p); }; } else if (memberInfo is FieldInfo fieldInfo) { return delegate (object? o, object? p) { fieldInfo.SetValue(o, p); }; } throw new InvalidOperationException(SR.XmlInternalError); } else { Action<TObj, TParam>? setTypedDelegate = null; if (memberInfo is PropertyInfo propInfo) { var setMethod = propInfo.GetSetMethod(true); if (setMethod == null) { return delegate (object? o, object? p) { // Maintain the same failure behavior as non-cached delegate propInfo.SetValue(o, p); }; } setTypedDelegate = (Action<TObj, TParam>)setMethod.CreateDelegate(typeof(Action<TObj, TParam>)); } else if (memberInfo is FieldInfo fieldInfo) { var objectParam = Expression.Parameter(typeof(TObj)); var valueParam = Expression.Parameter(typeof(TParam)); var fieldExpr = Expression.Field(objectParam, fieldInfo); var assignExpr = Expression.Assign(fieldExpr, valueParam); setTypedDelegate = Expression.Lambda<Action<TObj, TParam>>(assignExpr, objectParam, valueParam).Compile(); } return delegate (object? o, object? p) { setTypedDelegate!((TObj)o!, (TParam)p!); }; } } } }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/JIT/Regression/JitBlue/Runtime_54118/Runtime_54118.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/JIT/HardwareIntrinsics/X86/Avx2/BlendVariable.UInt32.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void BlendVariableUInt32() { var test = new SimpleTernaryOpTest__BlendVariableUInt32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); if (Avx.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (Avx.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (Avx.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (Avx.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (Avx.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__BlendVariableUInt32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(UInt32[] inArray1, UInt32[] inArray2, UInt32[] inArray3, UInt32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt32>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<UInt32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt32>(); if ((alignment != 32 && alignment != 16) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<UInt32, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector256<UInt32> _fld1; public Vector256<UInt32> _fld2; public Vector256<UInt32> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = (((i % 2) == 0) ? Convert.ToUInt32("0xFFFFFFFF", 16) : (uint)0); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref testStruct._fld3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__BlendVariableUInt32 testClass) { var result = Avx2.BlendVariable(_fld1, _fld2, _fld3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleTernaryOpTest__BlendVariableUInt32 testClass) { fixed (Vector256<UInt32>* pFld1 = &_fld1) fixed (Vector256<UInt32>* pFld2 = &_fld2) fixed (Vector256<UInt32>* pFld3 = &_fld3) { var result = Avx2.BlendVariable( Avx.LoadVector256((UInt32*)(pFld1)), Avx.LoadVector256((UInt32*)(pFld2)), Avx.LoadVector256((UInt32*)(pFld3)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32); private static UInt32[] _data1 = new UInt32[Op1ElementCount]; private static UInt32[] _data2 = new UInt32[Op2ElementCount]; private static UInt32[] _data3 = new UInt32[Op3ElementCount]; private static Vector256<UInt32> _clsVar1; private static Vector256<UInt32> _clsVar2; private static Vector256<UInt32> _clsVar3; private Vector256<UInt32> _fld1; private Vector256<UInt32> _fld2; private Vector256<UInt32> _fld3; private DataTable _dataTable; static SimpleTernaryOpTest__BlendVariableUInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _clsVar2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = (((i % 2) == 0) ? Convert.ToUInt32("0xFFFFFFFF", 16) : (uint)0); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _clsVar3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); } public SimpleTernaryOpTest__BlendVariableUInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = (((i % 2) == 0) ? Convert.ToUInt32("0xFFFFFFFF", 16) : (uint)0); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _fld3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = (((i % 2) == 0) ? Convert.ToUInt32("0xFFFFFFFF", 16) : (uint)0); } _dataTable = new DataTable(_data1, _data2, _data3, new UInt32[RetElementCount], LargestVectorSize); } public bool IsSupported => Avx2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Avx2.BlendVariable( Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray3Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Avx2.BlendVariable( Avx.LoadVector256((UInt32*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((UInt32*)(_dataTable.inArray2Ptr)), Avx.LoadVector256((UInt32*)(_dataTable.inArray3Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Avx2.BlendVariable( Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray2Ptr)), Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray3Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Avx2).GetMethod(nameof(Avx2.BlendVariable), new Type[] { typeof(Vector256<UInt32>), typeof(Vector256<UInt32>), typeof(Vector256<UInt32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray3Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<UInt32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Avx2).GetMethod(nameof(Avx2.BlendVariable), new Type[] { typeof(Vector256<UInt32>), typeof(Vector256<UInt32>), typeof(Vector256<UInt32>) }) .Invoke(null, new object[] { Avx.LoadVector256((UInt32*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((UInt32*)(_dataTable.inArray2Ptr)), Avx.LoadVector256((UInt32*)(_dataTable.inArray3Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<UInt32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Avx2).GetMethod(nameof(Avx2.BlendVariable), new Type[] { typeof(Vector256<UInt32>), typeof(Vector256<UInt32>), typeof(Vector256<UInt32>) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray2Ptr)), Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray3Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<UInt32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Avx2.BlendVariable( _clsVar1, _clsVar2, _clsVar3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector256<UInt32>* pClsVar1 = &_clsVar1) fixed (Vector256<UInt32>* pClsVar2 = &_clsVar2) fixed (Vector256<UInt32>* pClsVar3 = &_clsVar3) { var result = Avx2.BlendVariable( Avx.LoadVector256((UInt32*)(pClsVar1)), Avx.LoadVector256((UInt32*)(pClsVar2)), Avx.LoadVector256((UInt32*)(pClsVar3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray2Ptr); var op3 = Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray3Ptr); var result = Avx2.BlendVariable(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = Avx.LoadVector256((UInt32*)(_dataTable.inArray1Ptr)); var op2 = Avx.LoadVector256((UInt32*)(_dataTable.inArray2Ptr)); var op3 = Avx.LoadVector256((UInt32*)(_dataTable.inArray3Ptr)); var result = Avx2.BlendVariable(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var op1 = Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray1Ptr)); var op2 = Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray2Ptr)); var op3 = Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray3Ptr)); var result = Avx2.BlendVariable(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__BlendVariableUInt32(); var result = Avx2.BlendVariable(test._fld1, test._fld2, test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleTernaryOpTest__BlendVariableUInt32(); fixed (Vector256<UInt32>* pFld1 = &test._fld1) fixed (Vector256<UInt32>* pFld2 = &test._fld2) fixed (Vector256<UInt32>* pFld3 = &test._fld3) { var result = Avx2.BlendVariable( Avx.LoadVector256((UInt32*)(pFld1)), Avx.LoadVector256((UInt32*)(pFld2)), Avx.LoadVector256((UInt32*)(pFld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Avx2.BlendVariable(_fld1, _fld2, _fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector256<UInt32>* pFld1 = &_fld1) fixed (Vector256<UInt32>* pFld2 = &_fld2) fixed (Vector256<UInt32>* pFld3 = &_fld3) { var result = Avx2.BlendVariable( Avx.LoadVector256((UInt32*)(pFld1)), Avx.LoadVector256((UInt32*)(pFld2)), Avx.LoadVector256((UInt32*)(pFld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Avx2.BlendVariable(test._fld1, test._fld2, test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = Avx2.BlendVariable( Avx.LoadVector256((UInt32*)(&test._fld1)), Avx.LoadVector256((UInt32*)(&test._fld2)), Avx.LoadVector256((UInt32*)(&test._fld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<UInt32> op1, Vector256<UInt32> op2, Vector256<UInt32> op3, void* result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; UInt32[] inArray3 = new UInt32[Op3ElementCount]; UInt32[] outArray = new UInt32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), op2); Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; UInt32[] inArray3 = new UInt32[Op3ElementCount]; UInt32[] outArray = new UInt32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(UInt32[] firstOp, UInt32[] secondOp, UInt32[] thirdOp, UInt32[] result, [CallerMemberName] string method = "") { bool succeeded = true; if ((thirdOp[0] != 0) ? secondOp[0] != result[0] : firstOp[0] != result[0]) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if ((thirdOp[i] != 0) ? secondOp[i] != result[i] : firstOp[i] != result[i]) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Avx2)}.{nameof(Avx2.BlendVariable)}<UInt32>(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void BlendVariableUInt32() { var test = new SimpleTernaryOpTest__BlendVariableUInt32(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); if (Avx.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (Avx.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (Avx.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (Avx.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (Avx.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__BlendVariableUInt32 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(UInt32[] inArray1, UInt32[] inArray2, UInt32[] inArray3, UInt32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt32>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<UInt32>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt32>(); if ((alignment != 32 && alignment != 16) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt32, byte>(ref inArray2[0]), (uint)sizeOfinArray2); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<UInt32, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector256<UInt32> _fld1; public Vector256<UInt32> _fld2; public Vector256<UInt32> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = (((i % 2) == 0) ? Convert.ToUInt32("0xFFFFFFFF", 16) : (uint)0); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref testStruct._fld3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__BlendVariableUInt32 testClass) { var result = Avx2.BlendVariable(_fld1, _fld2, _fld3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleTernaryOpTest__BlendVariableUInt32 testClass) { fixed (Vector256<UInt32>* pFld1 = &_fld1) fixed (Vector256<UInt32>* pFld2 = &_fld2) fixed (Vector256<UInt32>* pFld3 = &_fld3) { var result = Avx2.BlendVariable( Avx.LoadVector256((UInt32*)(pFld1)), Avx.LoadVector256((UInt32*)(pFld2)), Avx.LoadVector256((UInt32*)(pFld3)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<UInt32>>() / sizeof(UInt32); private static UInt32[] _data1 = new UInt32[Op1ElementCount]; private static UInt32[] _data2 = new UInt32[Op2ElementCount]; private static UInt32[] _data3 = new UInt32[Op3ElementCount]; private static Vector256<UInt32> _clsVar1; private static Vector256<UInt32> _clsVar2; private static Vector256<UInt32> _clsVar3; private Vector256<UInt32> _fld1; private Vector256<UInt32> _fld2; private Vector256<UInt32> _fld3; private DataTable _dataTable; static SimpleTernaryOpTest__BlendVariableUInt32() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _clsVar2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = (((i % 2) == 0) ? Convert.ToUInt32("0xFFFFFFFF", 16) : (uint)0); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _clsVar3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); } public SimpleTernaryOpTest__BlendVariableUInt32() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _fld2), ref Unsafe.As<UInt32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = (((i % 2) == 0) ? Convert.ToUInt32("0xFFFFFFFF", 16) : (uint)0); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<UInt32>, byte>(ref _fld3), ref Unsafe.As<UInt32, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt32(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = (((i % 2) == 0) ? Convert.ToUInt32("0xFFFFFFFF", 16) : (uint)0); } _dataTable = new DataTable(_data1, _data2, _data3, new UInt32[RetElementCount], LargestVectorSize); } public bool IsSupported => Avx2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Avx2.BlendVariable( Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray3Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Avx2.BlendVariable( Avx.LoadVector256((UInt32*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((UInt32*)(_dataTable.inArray2Ptr)), Avx.LoadVector256((UInt32*)(_dataTable.inArray3Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Avx2.BlendVariable( Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray2Ptr)), Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray3Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Avx2).GetMethod(nameof(Avx2.BlendVariable), new Type[] { typeof(Vector256<UInt32>), typeof(Vector256<UInt32>), typeof(Vector256<UInt32>) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray3Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<UInt32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Avx2).GetMethod(nameof(Avx2.BlendVariable), new Type[] { typeof(Vector256<UInt32>), typeof(Vector256<UInt32>), typeof(Vector256<UInt32>) }) .Invoke(null, new object[] { Avx.LoadVector256((UInt32*)(_dataTable.inArray1Ptr)), Avx.LoadVector256((UInt32*)(_dataTable.inArray2Ptr)), Avx.LoadVector256((UInt32*)(_dataTable.inArray3Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<UInt32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Avx2).GetMethod(nameof(Avx2.BlendVariable), new Type[] { typeof(Vector256<UInt32>), typeof(Vector256<UInt32>), typeof(Vector256<UInt32>) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray1Ptr)), Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray2Ptr)), Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray3Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<UInt32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Avx2.BlendVariable( _clsVar1, _clsVar2, _clsVar3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector256<UInt32>* pClsVar1 = &_clsVar1) fixed (Vector256<UInt32>* pClsVar2 = &_clsVar2) fixed (Vector256<UInt32>* pClsVar3 = &_clsVar3) { var result = Avx2.BlendVariable( Avx.LoadVector256((UInt32*)(pClsVar1)), Avx.LoadVector256((UInt32*)(pClsVar2)), Avx.LoadVector256((UInt32*)(pClsVar3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray2Ptr); var op3 = Unsafe.Read<Vector256<UInt32>>(_dataTable.inArray3Ptr); var result = Avx2.BlendVariable(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = Avx.LoadVector256((UInt32*)(_dataTable.inArray1Ptr)); var op2 = Avx.LoadVector256((UInt32*)(_dataTable.inArray2Ptr)); var op3 = Avx.LoadVector256((UInt32*)(_dataTable.inArray3Ptr)); var result = Avx2.BlendVariable(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var op1 = Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray1Ptr)); var op2 = Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray2Ptr)); var op3 = Avx.LoadAlignedVector256((UInt32*)(_dataTable.inArray3Ptr)); var result = Avx2.BlendVariable(op1, op2, op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__BlendVariableUInt32(); var result = Avx2.BlendVariable(test._fld1, test._fld2, test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleTernaryOpTest__BlendVariableUInt32(); fixed (Vector256<UInt32>* pFld1 = &test._fld1) fixed (Vector256<UInt32>* pFld2 = &test._fld2) fixed (Vector256<UInt32>* pFld3 = &test._fld3) { var result = Avx2.BlendVariable( Avx.LoadVector256((UInt32*)(pFld1)), Avx.LoadVector256((UInt32*)(pFld2)), Avx.LoadVector256((UInt32*)(pFld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Avx2.BlendVariable(_fld1, _fld2, _fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector256<UInt32>* pFld1 = &_fld1) fixed (Vector256<UInt32>* pFld2 = &_fld2) fixed (Vector256<UInt32>* pFld3 = &_fld3) { var result = Avx2.BlendVariable( Avx.LoadVector256((UInt32*)(pFld1)), Avx.LoadVector256((UInt32*)(pFld2)), Avx.LoadVector256((UInt32*)(pFld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Avx2.BlendVariable(test._fld1, test._fld2, test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = Avx2.BlendVariable( Avx.LoadVector256((UInt32*)(&test._fld1)), Avx.LoadVector256((UInt32*)(&test._fld2)), Avx.LoadVector256((UInt32*)(&test._fld3)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<UInt32> op1, Vector256<UInt32> op2, Vector256<UInt32> op3, void* result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; UInt32[] inArray3 = new UInt32[Op3ElementCount]; UInt32[] outArray = new UInt32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), op2); Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "") { UInt32[] inArray1 = new UInt32[Op1ElementCount]; UInt32[] inArray2 = new UInt32[Op2ElementCount]; UInt32[] inArray3 = new UInt32[Op3ElementCount]; UInt32[] outArray = new UInt32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<UInt32>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(UInt32[] firstOp, UInt32[] secondOp, UInt32[] thirdOp, UInt32[] result, [CallerMemberName] string method = "") { bool succeeded = true; if ((thirdOp[0] != 0) ? secondOp[0] != result[0] : firstOp[0] != result[0]) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if ((thirdOp[i] != 0) ? secondOp[i] != result[i] : firstOp[i] != result[i]) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Avx2)}.{nameof(Avx2.BlendVariable)}<UInt32>(Vector256<UInt32>, Vector256<UInt32>, Vector256<UInt32>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/src/libunwind/include/libunwind-coredump.h
/* libunwind - a platform-independent unwind library This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef libunwind_coredump_h #define libunwind_coredump_h #include <libunwind.h> #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif /* Helper routines which make it easy to use libunwind on a coredump. They're available only if UNW_REMOTE_ONLY is _not_ defined and they aren't really part of the libunwind API. They are implemented in a archive library called libunwind-coredump.a. */ struct UCD_info; extern struct UCD_info *_UCD_create(const char *filename); extern void _UCD_destroy(struct UCD_info *); extern int _UCD_get_num_threads(struct UCD_info *); extern void _UCD_select_thread(struct UCD_info *, int); extern pid_t _UCD_get_pid(struct UCD_info *); extern int _UCD_get_cursig(struct UCD_info *); extern int _UCD_add_backing_file_at_segment(struct UCD_info *, int phdr_no, const char *filename); extern int _UCD_add_backing_file_at_vaddr(struct UCD_info *, unsigned long vaddr, const char *filename); extern int _UCD_find_proc_info (unw_addr_space_t, unw_word_t, unw_proc_info_t *, int, void *); extern void _UCD_put_unwind_info (unw_addr_space_t, unw_proc_info_t *, void *); extern int _UCD_get_dyn_info_list_addr (unw_addr_space_t, unw_word_t *, void *); extern int _UCD_access_mem (unw_addr_space_t, unw_word_t, unw_word_t *, int, void *); extern int _UCD_access_reg (unw_addr_space_t, unw_regnum_t, unw_word_t *, int, void *); extern int _UCD_access_fpreg (unw_addr_space_t, unw_regnum_t, unw_fpreg_t *, int, void *); extern int _UCD_get_proc_name (unw_addr_space_t, unw_word_t, char *, size_t, unw_word_t *, void *); extern int _UCD_resume (unw_addr_space_t, unw_cursor_t *, void *); extern unw_accessors_t _UCD_accessors; #if defined(__cplusplus) || defined(c_plusplus) } #endif #endif /* libunwind_coredump_h */
/* libunwind - a platform-independent unwind library This file is part of libunwind. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef libunwind_coredump_h #define libunwind_coredump_h #include <libunwind.h> #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif /* Helper routines which make it easy to use libunwind on a coredump. They're available only if UNW_REMOTE_ONLY is _not_ defined and they aren't really part of the libunwind API. They are implemented in a archive library called libunwind-coredump.a. */ struct UCD_info; extern struct UCD_info *_UCD_create(const char *filename); extern void _UCD_destroy(struct UCD_info *); extern int _UCD_get_num_threads(struct UCD_info *); extern void _UCD_select_thread(struct UCD_info *, int); extern pid_t _UCD_get_pid(struct UCD_info *); extern int _UCD_get_cursig(struct UCD_info *); extern int _UCD_add_backing_file_at_segment(struct UCD_info *, int phdr_no, const char *filename); extern int _UCD_add_backing_file_at_vaddr(struct UCD_info *, unsigned long vaddr, const char *filename); extern int _UCD_find_proc_info (unw_addr_space_t, unw_word_t, unw_proc_info_t *, int, void *); extern void _UCD_put_unwind_info (unw_addr_space_t, unw_proc_info_t *, void *); extern int _UCD_get_dyn_info_list_addr (unw_addr_space_t, unw_word_t *, void *); extern int _UCD_access_mem (unw_addr_space_t, unw_word_t, unw_word_t *, int, void *); extern int _UCD_access_reg (unw_addr_space_t, unw_regnum_t, unw_word_t *, int, void *); extern int _UCD_access_fpreg (unw_addr_space_t, unw_regnum_t, unw_fpreg_t *, int, void *); extern int _UCD_get_proc_name (unw_addr_space_t, unw_word_t, char *, size_t, unw_word_t *, void *); extern int _UCD_resume (unw_addr_space_t, unw_cursor_t *, void *); extern unw_accessors_t _UCD_accessors; #if defined(__cplusplus) || defined(c_plusplus) } #endif #endif /* libunwind_coredump_h */
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b31283/b31283.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/libraries/System.Private.CoreLib/src/System/Reflection/AssemblyCopyrightAttribute.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Reflection { [AttributeUsage(AttributeTargets.Assembly, Inherited = false)] public sealed class AssemblyCopyrightAttribute : Attribute { public AssemblyCopyrightAttribute(string copyright) { Copyright = copyright; } public string Copyright { get; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Reflection { [AttributeUsage(AttributeTargets.Assembly, Inherited = false)] public sealed class AssemblyCopyrightAttribute : Attribute { public AssemblyCopyrightAttribute(string copyright) { Copyright = copyright; } public string Copyright { get; } } }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/JIT/Methodical/VT/callconv/jumper2_il_r.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="jumper2.il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="jumper2.il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/Interop/PInvoke/CriticalHandles/CriticalHandlesNative.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdio.h> #include <xplatform.h> typedef BOOL(__stdcall *HandleCallback)(void*); extern "C" DLL_EXPORT size_t __stdcall In(void* handle, HandleCallback handleCallback) { if (handleCallback != nullptr && !handleCallback(handle)) { return (size_t)(-1); } return reinterpret_cast<size_t>(handle); } extern "C" DLL_EXPORT void* __stdcall Ret(void* handleValue) { return handleValue; } extern "C" DLL_EXPORT void __stdcall Out(void* handleValue, void** pHandle) { if (pHandle == nullptr) { return; } *pHandle = handleValue; } extern "C" DLL_EXPORT size_t __stdcall Ref(void** pHandle, HandleCallback handleCallback) { if (handleCallback != nullptr && !handleCallback(*pHandle)) { return (size_t)(-1); } return reinterpret_cast<size_t>(*pHandle); } extern "C" DLL_EXPORT size_t __stdcall RefModify(void* handleValue, void** pHandle, HandleCallback handleCallback) { if (handleCallback != nullptr && !handleCallback(*pHandle)) { return (size_t)(-1); } void* originalHandle = *pHandle; *pHandle = handleValue; return reinterpret_cast<size_t>(originalHandle); } typedef void(__stdcall *InCallback)(void*); extern "C" DLL_EXPORT void __stdcall InvokeInCallback(InCallback callback, void* handle) { callback(handle); } typedef void(__stdcall *RefCallback)(void**); extern "C" DLL_EXPORT void __stdcall InvokeRefCallback(RefCallback callback, void** pHandle) { callback(pHandle); } typedef void*(__stdcall *RetCallback)(); extern "C" DLL_EXPORT void* __stdcall InvokeRetCallback(RetCallback callback) { return callback(); }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdio.h> #include <xplatform.h> typedef BOOL(__stdcall *HandleCallback)(void*); extern "C" DLL_EXPORT size_t __stdcall In(void* handle, HandleCallback handleCallback) { if (handleCallback != nullptr && !handleCallback(handle)) { return (size_t)(-1); } return reinterpret_cast<size_t>(handle); } extern "C" DLL_EXPORT void* __stdcall Ret(void* handleValue) { return handleValue; } extern "C" DLL_EXPORT void __stdcall Out(void* handleValue, void** pHandle) { if (pHandle == nullptr) { return; } *pHandle = handleValue; } extern "C" DLL_EXPORT size_t __stdcall Ref(void** pHandle, HandleCallback handleCallback) { if (handleCallback != nullptr && !handleCallback(*pHandle)) { return (size_t)(-1); } return reinterpret_cast<size_t>(*pHandle); } extern "C" DLL_EXPORT size_t __stdcall RefModify(void* handleValue, void** pHandle, HandleCallback handleCallback) { if (handleCallback != nullptr && !handleCallback(*pHandle)) { return (size_t)(-1); } void* originalHandle = *pHandle; *pHandle = handleValue; return reinterpret_cast<size_t>(originalHandle); } typedef void(__stdcall *InCallback)(void*); extern "C" DLL_EXPORT void __stdcall InvokeInCallback(InCallback callback, void* handle) { callback(handle); } typedef void(__stdcall *RefCallback)(void**); extern "C" DLL_EXPORT void __stdcall InvokeRefCallback(RefCallback callback, void** pHandle) { callback(pHandle); } typedef void*(__stdcall *RetCallback)(); extern "C" DLL_EXPORT void* __stdcall InvokeRetCallback(RetCallback callback) { return callback(); }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/jit/jitpch.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdint.h> #include <windows.h> #include <wchar.h> #include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <limits.h> #include <string.h> #include <float.h> #include <share.h> #include <cstdlib> #include <intrin.h> #include "jitconfig.h" #include "jit.h" #include "iallocator.h" #include "hashbv.h" #include "compiler.h" #include "dataflow.h" #include "block.h" #include "jiteh.h" #include "rationalize.h" #include "jitstd.h" #include "ssaconfig.h" #include "blockset.h" #include "bitvec.h" #include "inline.h" #include "objectalloc.h"
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <stdint.h> #include <windows.h> #include <wchar.h> #include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <limits.h> #include <string.h> #include <float.h> #include <share.h> #include <cstdlib> #include <intrin.h> #include "jitconfig.h" #include "jit.h" #include "iallocator.h" #include "hashbv.h" #include "compiler.h" #include "dataflow.h" #include "block.h" #include "jiteh.h" #include "rationalize.h" #include "jitstd.h" #include "ssaconfig.h" #include "blockset.h" #include "bitvec.h" #include "inline.h" #include "objectalloc.h"
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/JIT/Performance/CodeQuality/Math/Functions/Single/AbsSingle.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace Functions { public static partial class MathTests { // Tests Math.Abs(single) over 5000 iterations for the domain -1, +1 private const float absSingleDelta = 0.0004f; private const float absSingleExpectedResult = 2500.03125f; public static void AbsSingleTest() { var result = 0.0f; var value = -1.0f; for (var iteration = 0; iteration < iterations; iteration++) { value += absSingleDelta; result += Math.Abs(value); } var diff = Math.Abs(absSingleExpectedResult - result); if (diff > singleEpsilon) { throw new Exception($"Expected Result {absSingleExpectedResult,10:g9}; Actual Result {result,10:g9}"); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace Functions { public static partial class MathTests { // Tests Math.Abs(single) over 5000 iterations for the domain -1, +1 private const float absSingleDelta = 0.0004f; private const float absSingleExpectedResult = 2500.03125f; public static void AbsSingleTest() { var result = 0.0f; var value = -1.0f; for (var iteration = 0; iteration < iterations; iteration++) { value += absSingleDelta; result += Math.Abs(value); } var diff = Math.Abs(absSingleExpectedResult - result); if (diff > singleEpsilon) { throw new Exception($"Expected Result {absSingleExpectedResult,10:g9}; Actual Result {result,10:g9}"); } } } }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/coreclr/pal/tests/palsuite/miscellaneous/InterlockedExchange64/test1/test.cpp
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: test.c ** ** Purpose: InterlockedExchange64() function ** ** **=========================================================*/ /* This test is FINISHED. Note: The biggest feature of this function is that it locks the value before it increments it -- in order to make it so only one thread can access it. But, I really don't have a great test to make sure it's thread safe. Any ideas? */ #include <palsuite.h> #define START_VALUE 0 PALTEST(miscellaneous_InterlockedExchange64_test1_paltest_interlockedexchange64_test1, "miscellaneous/InterlockedExchange64/test1/paltest_interlockedexchange64_test1") { LONGLONG TheValue = START_VALUE; LONGLONG NewValue = 5; LONGLONG TheReturn; /* * Initialize the PAL and return FAILURE if this fails */ if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } /* ** Run only on 64 bit platforms */ #if defined(HOST_64BIT) TheReturn = InterlockedExchange64(&TheValue,NewValue); /* Compare the exchanged value with the value we exchanged it with. Should be the same. */ if(TheValue != NewValue) { Fail("ERROR: The value which was exchanged should now be %ll, but " "instead it is %ll.",NewValue,TheValue); } /* Check to make sure it returns the origional number which 'TheValue' was set to. */ if(TheReturn != START_VALUE) { Fail("ERROR: The value returned should be the value before the " "exchange happened, which was %ll, but %ll was returned.", START_VALUE,TheReturn); } #endif // HOST_64BIT PAL_Terminate(); return PASS; }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** Source: test.c ** ** Purpose: InterlockedExchange64() function ** ** **=========================================================*/ /* This test is FINISHED. Note: The biggest feature of this function is that it locks the value before it increments it -- in order to make it so only one thread can access it. But, I really don't have a great test to make sure it's thread safe. Any ideas? */ #include <palsuite.h> #define START_VALUE 0 PALTEST(miscellaneous_InterlockedExchange64_test1_paltest_interlockedexchange64_test1, "miscellaneous/InterlockedExchange64/test1/paltest_interlockedexchange64_test1") { LONGLONG TheValue = START_VALUE; LONGLONG NewValue = 5; LONGLONG TheReturn; /* * Initialize the PAL and return FAILURE if this fails */ if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } /* ** Run only on 64 bit platforms */ #if defined(HOST_64BIT) TheReturn = InterlockedExchange64(&TheValue,NewValue); /* Compare the exchanged value with the value we exchanged it with. Should be the same. */ if(TheValue != NewValue) { Fail("ERROR: The value which was exchanged should now be %ll, but " "instead it is %ll.",NewValue,TheValue); } /* Check to make sure it returns the origional number which 'TheValue' was set to. */ if(TheReturn != START_VALUE) { Fail("ERROR: The value returned should be the value before the " "exchange happened, which was %ll, but %ll was returned.", START_VALUE,TheReturn); } #endif // HOST_64BIT PAL_Terminate(); return PASS; }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M12-Beta2/b53980/b53980.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="$(MSBuildProjectName).il" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/JIT/CodeGenBringUpTests/LocallocCnstB1_PSP.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; using System.Runtime.CompilerServices; public class BringUpTest_LocallocCnstB1_PSP { const int Pass = 100; const int Fail = -1; // Reduce all values to byte [MethodImplAttribute(MethodImplOptions.NoInlining)] public static unsafe bool CHECK(byte check, byte expected) { return check == expected; } [MethodImplAttribute(MethodImplOptions.NoInlining)] public static unsafe int LocallocCnstB1_PSP() { byte* a = stackalloc byte[1]; int i; for (i = 0; i < 1; i++) { a[i] = (byte) i; } i = 0; try { for (; i < 1; i++) { if (!CHECK(a[i], (byte) i)) return i; } } catch { Console.WriteLine("ERROR!!!"); return i; } return -1; } public static int Main() { int ret; ret = LocallocCnstB1_PSP(); if (ret != -1) { Console.WriteLine("LocallocCnstB1_PSP: Failed on index: " + ret); return Fail; } return Pass; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; using System.Runtime.CompilerServices; public class BringUpTest_LocallocCnstB1_PSP { const int Pass = 100; const int Fail = -1; // Reduce all values to byte [MethodImplAttribute(MethodImplOptions.NoInlining)] public static unsafe bool CHECK(byte check, byte expected) { return check == expected; } [MethodImplAttribute(MethodImplOptions.NoInlining)] public static unsafe int LocallocCnstB1_PSP() { byte* a = stackalloc byte[1]; int i; for (i = 0; i < 1; i++) { a[i] = (byte) i; } i = 0; try { for (; i < 1; i++) { if (!CHECK(a[i], (byte) i)) return i; } } catch { Console.WriteLine("ERROR!!!"); return i; } return -1; } public static int Main() { int ret; ret = LocallocCnstB1_PSP(); if (ret != -1) { Console.WriteLine("LocallocCnstB1_PSP: Failed on index: " + ret); return Fail; } return Pass; } }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/libraries/System.Configuration.ConfigurationManager/src/System/Configuration/NameValueFileSectionHandler.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Configuration.Internal; using System.IO; using System.Xml; namespace System.Configuration { /// <summary> /// This section handler allows &lt;appSettings file="user.config" /&gt; /// The file pointed to by the file= attribute is read as if it is /// an appSettings section in the config file. /// Note: the user.config file must have its root element match the /// section referring to it. So if appSettings has a file="user.config" /// attribute the root element in user.config must also be named appSettings. /// </summary> public class NameValueFileSectionHandler : IConfigurationSectionHandler { public object Create(object parent, object configContext, XmlNode section) { object result = parent; // parse XML XmlNode fileAttribute = section.Attributes.RemoveNamedItem("file"); result = NameValueSectionHandler.CreateStatic(result, section); if (fileAttribute != null && fileAttribute.Value.Length != 0) { string filename; filename = fileAttribute.Value; IConfigErrorInfo configXmlNode = fileAttribute as IConfigErrorInfo; if (configXmlNode == null) { return null; } string configFile = configXmlNode.Filename; string directory = Path.GetDirectoryName(configFile); string sourceFileFullPath = Path.Combine(directory, filename); if (File.Exists(sourceFileFullPath)) { ConfigXmlDocument doc = new ConfigXmlDocument(); try { doc.Load(sourceFileFullPath); } catch (XmlException e) { throw new ConfigurationErrorsException(e.Message, e, sourceFileFullPath, e.LineNumber); } if (section.Name != doc.DocumentElement.Name) { throw new ConfigurationErrorsException( SR.Format(SR.Config_name_value_file_section_file_invalid_root, section.Name), doc.DocumentElement); } result = NameValueSectionHandler.CreateStatic(result, doc.DocumentElement); } } return result; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Configuration.Internal; using System.IO; using System.Xml; namespace System.Configuration { /// <summary> /// This section handler allows &lt;appSettings file="user.config" /&gt; /// The file pointed to by the file= attribute is read as if it is /// an appSettings section in the config file. /// Note: the user.config file must have its root element match the /// section referring to it. So if appSettings has a file="user.config" /// attribute the root element in user.config must also be named appSettings. /// </summary> public class NameValueFileSectionHandler : IConfigurationSectionHandler { public object Create(object parent, object configContext, XmlNode section) { object result = parent; // parse XML XmlNode fileAttribute = section.Attributes.RemoveNamedItem("file"); result = NameValueSectionHandler.CreateStatic(result, section); if (fileAttribute != null && fileAttribute.Value.Length != 0) { string filename; filename = fileAttribute.Value; IConfigErrorInfo configXmlNode = fileAttribute as IConfigErrorInfo; if (configXmlNode == null) { return null; } string configFile = configXmlNode.Filename; string directory = Path.GetDirectoryName(configFile); string sourceFileFullPath = Path.Combine(directory, filename); if (File.Exists(sourceFileFullPath)) { ConfigXmlDocument doc = new ConfigXmlDocument(); try { doc.Load(sourceFileFullPath); } catch (XmlException e) { throw new ConfigurationErrorsException(e.Message, e, sourceFileFullPath, e.LineNumber); } if (section.Name != doc.DocumentElement.Name) { throw new ConfigurationErrorsException( SR.Format(SR.Config_name_value_file_section_file_invalid_root, section.Name), doc.DocumentElement); } result = NameValueSectionHandler.CreateStatic(result, doc.DocumentElement); } } return result; } } }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/ShiftLeftLogicalSaturate.Vector64.Byte.1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftLeftLogicalSaturate_Vector64_Byte_1() { var test = new ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1 { private struct DataTable { private byte[] inArray; private byte[] outArray; private GCHandle inHandle; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray, Byte[] outArray, int alignment) { int sizeOfinArray = inArray.Length * Unsafe.SizeOf<Byte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<Byte, byte>(ref inArray[0]), (uint)sizeOfinArray); } public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Byte> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref testStruct._fld), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1 testClass) { var result = AdvSimd.ShiftLeftLogicalSaturate(_fld, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1 testClass) { fixed (Vector64<Byte>* pFld = &_fld) { var result = AdvSimd.ShiftLeftLogicalSaturate( AdvSimd.LoadVector64((Byte*)(pFld)), 1 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Byte>>() / sizeof(Byte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Byte>>() / sizeof(Byte); private static readonly byte Imm = 1; private static Byte[] _data = new Byte[Op1ElementCount]; private static Vector64<Byte> _clsVar; private Vector64<Byte> _fld; private DataTable _dataTable; static ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref _clsVar), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); } public ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref _fld), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetByte(); } _dataTable = new DataTable(_data, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftLeftLogicalSaturate( Unsafe.Read<Vector64<Byte>>(_dataTable.inArrayPtr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftLeftLogicalSaturate( AdvSimd.LoadVector64((Byte*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLeftLogicalSaturate), new Type[] { typeof(Vector64<Byte>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Byte>>(_dataTable.inArrayPtr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Byte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLeftLogicalSaturate), new Type[] { typeof(Vector64<Byte>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Byte*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Byte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftLeftLogicalSaturate( _clsVar, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Byte>* pClsVar = &_clsVar) { var result = AdvSimd.ShiftLeftLogicalSaturate( AdvSimd.LoadVector64((Byte*)(pClsVar)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector64<Byte>>(_dataTable.inArrayPtr); var result = AdvSimd.ShiftLeftLogicalSaturate(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = AdvSimd.LoadVector64((Byte*)(_dataTable.inArrayPtr)); var result = AdvSimd.ShiftLeftLogicalSaturate(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1(); var result = AdvSimd.ShiftLeftLogicalSaturate(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1(); fixed (Vector64<Byte>* pFld = &test._fld) { var result = AdvSimd.ShiftLeftLogicalSaturate( AdvSimd.LoadVector64((Byte*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftLeftLogicalSaturate(_fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Byte>* pFld = &_fld) { var result = AdvSimd.ShiftLeftLogicalSaturate( AdvSimd.LoadVector64((Byte*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftLeftLogicalSaturate(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftLeftLogicalSaturate( AdvSimd.LoadVector64((Byte*)(&test._fld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Byte> firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Byte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector64<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Byte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(Byte[] firstOp, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftLeftLogicalSaturate(firstOp[i], Imm) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftLeftLogicalSaturate)}<Byte>(Vector64<Byte>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftLeftLogicalSaturate_Vector64_Byte_1() { var test = new ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1 { private struct DataTable { private byte[] inArray; private byte[] outArray; private GCHandle inHandle; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray, Byte[] outArray, int alignment) { int sizeOfinArray = inArray.Length * Unsafe.SizeOf<Byte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle = GCHandle.Alloc(this.inArray, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArrayPtr), ref Unsafe.As<Byte, byte>(ref inArray[0]), (uint)sizeOfinArray); } public void* inArrayPtr => Align((byte*)(inHandle.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Byte> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref testStruct._fld), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1 testClass) { var result = AdvSimd.ShiftLeftLogicalSaturate(_fld, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1 testClass) { fixed (Vector64<Byte>* pFld = &_fld) { var result = AdvSimd.ShiftLeftLogicalSaturate( AdvSimd.LoadVector64((Byte*)(pFld)), 1 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Byte>>() / sizeof(Byte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Byte>>() / sizeof(Byte); private static readonly byte Imm = 1; private static Byte[] _data = new Byte[Op1ElementCount]; private static Vector64<Byte> _clsVar; private Vector64<Byte> _fld; private DataTable _dataTable; static ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref _clsVar), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); } public ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref _fld), ref Unsafe.As<Byte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = TestLibrary.Generator.GetByte(); } _dataTable = new DataTable(_data, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftLeftLogicalSaturate( Unsafe.Read<Vector64<Byte>>(_dataTable.inArrayPtr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftLeftLogicalSaturate( AdvSimd.LoadVector64((Byte*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLeftLogicalSaturate), new Type[] { typeof(Vector64<Byte>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Byte>>(_dataTable.inArrayPtr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Byte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLeftLogicalSaturate), new Type[] { typeof(Vector64<Byte>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Byte*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Byte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftLeftLogicalSaturate( _clsVar, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Byte>* pClsVar = &_clsVar) { var result = AdvSimd.ShiftLeftLogicalSaturate( AdvSimd.LoadVector64((Byte*)(pClsVar)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector64<Byte>>(_dataTable.inArrayPtr); var result = AdvSimd.ShiftLeftLogicalSaturate(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = AdvSimd.LoadVector64((Byte*)(_dataTable.inArrayPtr)); var result = AdvSimd.ShiftLeftLogicalSaturate(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1(); var result = AdvSimd.ShiftLeftLogicalSaturate(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new ImmUnaryOpTest__ShiftLeftLogicalSaturate_Vector64_Byte_1(); fixed (Vector64<Byte>* pFld = &test._fld) { var result = AdvSimd.ShiftLeftLogicalSaturate( AdvSimd.LoadVector64((Byte*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftLeftLogicalSaturate(_fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Byte>* pFld = &_fld) { var result = AdvSimd.ShiftLeftLogicalSaturate( AdvSimd.LoadVector64((Byte*)(pFld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftLeftLogicalSaturate(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftLeftLogicalSaturate( AdvSimd.LoadVector64((Byte*)(&test._fld)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Byte> firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Byte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { Byte[] inArray = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector64<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Byte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(Byte[] firstOp, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftLeftLogicalSaturate(firstOp[i], Imm) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftLeftLogicalSaturate)}<Byte>(Vector64<Byte>, 1): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/native/external/zlib-intel/x86.c
/* * x86 feature check * * Copyright (C) 2013 Intel Corporation. All rights reserved. * Author: * Jim Kukunas * * For conditions of distribution and use, see copyright notice in zlib.h */ #include "x86.h" #ifdef ZLIB_X86 int x86_cpu_has_sse2; int x86_cpu_has_sse42; int x86_cpu_has_pclmul; #ifdef _MSC_VER #include <intrin.h> #else #include <cpuid.h> #endif #ifndef bit_SSE2 # define bit_SSE2 0x4000000 #endif #ifndef bit_SSE4_2 # define bit_SSE4_2 0x100000 #endif #ifndef bit_PCLMUL # define bit_PCLMUL 0x2 #endif void x86_check_features(void) { static int once; enum reg { A = 0, B = 1, C = 2, D = 3}; int regs[4]; if (once != 0) return; once = 1; #ifdef _MSC_VER __cpuid(regs, 1); #else __cpuid(1, regs[A], regs[B], regs[C], regs[D]); #endif x86_cpu_has_sse2 = regs[D] & bit_SSE2; x86_cpu_has_sse42= regs[C] & bit_SSE4_2; x86_cpu_has_pclmul=regs[C] & bit_PCLMUL; } #endif
/* * x86 feature check * * Copyright (C) 2013 Intel Corporation. All rights reserved. * Author: * Jim Kukunas * * For conditions of distribution and use, see copyright notice in zlib.h */ #include "x86.h" #ifdef ZLIB_X86 int x86_cpu_has_sse2; int x86_cpu_has_sse42; int x86_cpu_has_pclmul; #ifdef _MSC_VER #include <intrin.h> #else #include <cpuid.h> #endif #ifndef bit_SSE2 # define bit_SSE2 0x4000000 #endif #ifndef bit_SSE4_2 # define bit_SSE4_2 0x100000 #endif #ifndef bit_PCLMUL # define bit_PCLMUL 0x2 #endif void x86_check_features(void) { static int once; enum reg { A = 0, B = 1, C = 2, D = 3}; int regs[4]; if (once != 0) return; once = 1; #ifdef _MSC_VER __cpuid(regs, 1); #else __cpuid(1, regs[A], regs[B], regs[C], regs[D]); #endif x86_cpu_has_sse2 = regs[D] & bit_SSE2; x86_cpu_has_sse42= regs[C] & bit_SSE4_2; x86_cpu_has_pclmul=regs[C] & bit_PCLMUL; } #endif
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./eng/common/cross/arm64/sources.list.stretch
deb http://deb.debian.org/debian stretch main deb-src http://deb.debian.org/debian stretch main deb http://deb.debian.org/debian-security/ stretch/updates main deb-src http://deb.debian.org/debian-security/ stretch/updates main deb http://deb.debian.org/debian stretch-updates main deb-src http://deb.debian.org/debian stretch-updates main deb http://deb.debian.org/debian stretch-backports main contrib non-free deb-src http://deb.debian.org/debian stretch-backports main contrib non-free
deb http://deb.debian.org/debian stretch main deb-src http://deb.debian.org/debian stretch main deb http://deb.debian.org/debian-security/ stretch/updates main deb-src http://deb.debian.org/debian-security/ stretch/updates main deb http://deb.debian.org/debian stretch-updates main deb-src http://deb.debian.org/debian stretch-updates main deb http://deb.debian.org/debian stretch-backports main contrib non-free deb-src http://deb.debian.org/debian stretch-backports main contrib non-free
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/native/eventpipe/ds-server.c
#include "ds-rt-config.h" #ifdef ENABLE_PERFTRACING #if !defined(DS_INCLUDE_SOURCE_FILES) || defined(DS_FORCE_INCLUDE_SOURCE_FILES) #define DS_IMPL_SERVER_GETTER_SETTER #include "ds-server.h" #include "ds-ipc.h" #include "ds-protocol.h" #include "ds-process-protocol.h" #include "ds-eventpipe-protocol.h" #include "ds-dump-protocol.h" #include "ds-profiler-protocol.h" #include "ds-rt.h" /* * Globals and volatile access functions. */ static volatile uint32_t _server_shutting_down_state = 0; static ep_rt_wait_event_handle_t _server_resume_runtime_startup_event = { 0 }; static bool _server_disabled = false; static volatile bool _is_paused_for_startup = false; static inline bool server_volatile_load_shutting_down_state (void) { return (ep_rt_volatile_load_uint32_t (&_server_shutting_down_state) != 0) ? true : false; } static inline void server_volatile_store_shutting_down_state (bool state) { ep_rt_volatile_store_uint32_t (&_server_shutting_down_state, state ? 1 : 0); } /* * Forward declares of all static functions. */ static void server_error_callback_create ( const ep_char8_t *message, uint32_t code); static void server_error_callback_close ( const ep_char8_t *message, uint32_t code); static void server_warning_callback ( const ep_char8_t *message, uint32_t code); static bool server_protocol_helper_unknown_command ( DiagnosticsIpcMessage *message, DiagnosticsIpcStream *stream); /* * DiagnosticServer. */ static void server_error_callback_create ( const ep_char8_t *message, uint32_t code) { EP_ASSERT (message != NULL); DS_LOG_ERROR_2 ("Failed to create diagnostic IPC: error (%d): %s.", code, message); } static void server_error_callback_close ( const ep_char8_t *message, uint32_t code) { EP_ASSERT (message != NULL); DS_LOG_ERROR_2 ("Failed to close diagnostic IPC: error (%d): %s.", code, message); } static bool server_protocol_helper_unknown_command ( DiagnosticsIpcMessage *message, DiagnosticsIpcStream *stream) { DS_LOG_WARNING_1 ("Received unknown request type (%d)", ds_ipc_header_get_commandset (ds_ipc_message_get_header_ref (message))); ds_ipc_message_send_error (stream, DS_IPC_E_UNKNOWN_COMMAND); ds_ipc_stream_free (stream); return true; } static void server_warning_callback ( const ep_char8_t *message, uint32_t code) { EP_ASSERT (message != NULL); DS_LOG_WARNING_2 ("warning (%d): %s.", code, message); } EP_RT_DEFINE_THREAD_FUNC (server_thread) { EP_ASSERT (server_volatile_load_shutting_down_state () || ds_ipc_stream_factory_has_active_ports ()); if (!ds_ipc_stream_factory_has_active_ports ()) { #ifndef DS_IPC_DISABLE_LISTEN_PORTS DS_LOG_ERROR_0 ("Diagnostics IPC listener was undefined"); #endif return 1; } while (!server_volatile_load_shutting_down_state ()) { DiagnosticsIpcStream *stream = ds_ipc_stream_factory_get_next_available_stream (server_warning_callback); if (!stream) continue; ds_rt_auto_trace_signal (); DiagnosticsIpcMessage message; if (!ds_ipc_message_init (&message)) continue; if (!ds_ipc_message_initialize_stream (&message, stream)) { ds_ipc_message_send_error (stream, DS_IPC_E_BAD_ENCODING); ds_ipc_stream_free (stream); ds_ipc_message_fini (&message); continue; } if (ep_rt_utf8_string_compare ( (const ep_char8_t *)ds_ipc_header_get_magic_ref (ds_ipc_message_get_header_ref (&message)), (const ep_char8_t *)DOTNET_IPC_V1_MAGIC) != 0) { ds_ipc_message_send_error (stream, DS_IPC_E_UNKNOWN_MAGIC); ds_ipc_stream_free (stream); ds_ipc_message_fini (&message); continue; } DS_LOG_INFO_2 ("DiagnosticServer - received IPC message with command set (%d) and command id (%d)", ds_ipc_header_get_commandset (ds_ipc_message_get_header_ref (&message)), ds_ipc_header_get_commandid (ds_ipc_message_get_header_ref (&message))); switch ((DiagnosticsServerCommandSet)ds_ipc_header_get_commandset (ds_ipc_message_get_header_ref (&message))) { case DS_SERVER_COMMANDSET_EVENTPIPE: ds_eventpipe_protocol_helper_handle_ipc_message (&message, stream); break; case DS_SERVER_COMMANDSET_DUMP: ds_dump_protocol_helper_handle_ipc_message (&message, stream); break; case DS_SERVER_COMMANDSET_PROCESS: ds_process_protocol_helper_handle_ipc_message (&message, stream); break; case DS_SERVER_COMMANDSET_PROFILER: ds_profiler_protocol_helper_handle_ipc_message (&message, stream); break; default: server_protocol_helper_unknown_command (&message, stream); break; } ds_ipc_message_fini (&message); } return (ep_rt_thread_start_func_return_t)0; } void ds_server_disable (void) { _server_disabled = true; } bool ds_server_init (void) { if (!ds_ipc_stream_factory_init ()) return false; if (_server_disabled || !ds_rt_config_value_get_enable ()) return true; bool result = false; // Initialize PAL layer. if (!ds_ipc_pal_init ()) { DS_LOG_ERROR_1 ("Failed to initialize PAL layer (%d).", ep_rt_get_last_error ()); ep_raise_error (); } // Initialize the RuntimeIndentifier before use ds_ipc_advertise_cookie_v1_init (); // Ports can fail to be configured if (!ds_ipc_stream_factory_configure (server_error_callback_create)) DS_LOG_ERROR_0 ("At least one Diagnostic Port failed to be configured."); if (ds_ipc_stream_factory_any_suspended_ports ()) { ep_rt_wait_event_alloc (&_server_resume_runtime_startup_event, true, false); ep_raise_error_if_nok (ep_rt_wait_event_is_valid (&_server_resume_runtime_startup_event)); } if (ds_ipc_stream_factory_has_active_ports ()) { ds_rt_auto_trace_init (); ds_rt_auto_trace_launch (); ep_rt_thread_id_t thread_id = ep_rt_uint64_t_to_thread_id_t (0); if (!ep_rt_thread_create ((void *)server_thread, NULL, EP_THREAD_TYPE_SERVER, (void *)&thread_id)) { // Failed to create IPC thread. ds_ipc_stream_factory_close_ports (NULL); DS_LOG_ERROR_1 ("Failed to create diagnostic server thread (%d).", ep_rt_get_last_error ()); ep_raise_error (); } else { ds_rt_auto_trace_wait (); } } result = true; ep_on_exit: return result; ep_on_error: EP_ASSERT (!result); ep_exit_error_handler (); } bool ds_server_shutdown (void) { server_volatile_store_shutting_down_state (true); if (ds_ipc_stream_factory_has_active_ports ()) ds_ipc_stream_factory_shutdown (server_error_callback_close); ds_ipc_stream_factory_fini (); ds_ipc_pal_shutdown (); return true; } // This method will block runtime bring-up IFF DOTNET_DefaultDiagnosticPortSuspend != NULL and DOTNET_DiagnosticPorts != 0 (it's default state) // The _ds_resume_runtime_startup_event event will be signaled when the Diagnostics Monitor uses the ResumeRuntime Diagnostics IPC Command void ds_server_pause_for_diagnostics_monitor (void) { _is_paused_for_startup = true; if (ds_ipc_stream_factory_any_suspended_ports ()) { EP_ASSERT (ep_rt_wait_event_is_valid (&_server_resume_runtime_startup_event)); DS_LOG_ALWAYS_0 ("The runtime has been configured to pause during startup and is awaiting a Diagnostics IPC ResumeStartup command."); if (ep_rt_wait_event_wait (&_server_resume_runtime_startup_event, 5000, false) != 0) { ds_rt_server_log_pause_message (); DS_LOG_ALWAYS_0 ("The runtime has been configured to pause during startup and is awaiting a Diagnostics IPC ResumeStartup command and has waited 5 seconds."); ep_rt_wait_event_wait (&_server_resume_runtime_startup_event, EP_INFINITE_WAIT, false); } } // allow wait failures to fall through and the runtime to continue coming up } void ds_server_resume_runtime_startup (void) { ds_ipc_stream_factory_resume_current_port (); if (!ds_ipc_stream_factory_any_suspended_ports () && ep_rt_wait_event_is_valid (&_server_resume_runtime_startup_event)) { ep_rt_wait_event_set (&_server_resume_runtime_startup_event); _is_paused_for_startup = false; } } bool ds_server_is_paused_in_startup (void) { return _is_paused_for_startup; } #endif /* !defined(DS_INCLUDE_SOURCE_FILES) || defined(DS_FORCE_INCLUDE_SOURCE_FILES) */ #endif /* ENABLE_PERFTRACING */ #ifndef DS_INCLUDE_SOURCE_FILES extern const char quiet_linker_empty_file_warning_diagnostics_server; const char quiet_linker_empty_file_warning_diagnostics_server = 0; #endif
#include "ds-rt-config.h" #ifdef ENABLE_PERFTRACING #if !defined(DS_INCLUDE_SOURCE_FILES) || defined(DS_FORCE_INCLUDE_SOURCE_FILES) #define DS_IMPL_SERVER_GETTER_SETTER #include "ds-server.h" #include "ds-ipc.h" #include "ds-protocol.h" #include "ds-process-protocol.h" #include "ds-eventpipe-protocol.h" #include "ds-dump-protocol.h" #include "ds-profiler-protocol.h" #include "ds-rt.h" /* * Globals and volatile access functions. */ static volatile uint32_t _server_shutting_down_state = 0; static ep_rt_wait_event_handle_t _server_resume_runtime_startup_event = { 0 }; static bool _server_disabled = false; static volatile bool _is_paused_for_startup = false; static inline bool server_volatile_load_shutting_down_state (void) { return (ep_rt_volatile_load_uint32_t (&_server_shutting_down_state) != 0) ? true : false; } static inline void server_volatile_store_shutting_down_state (bool state) { ep_rt_volatile_store_uint32_t (&_server_shutting_down_state, state ? 1 : 0); } /* * Forward declares of all static functions. */ static void server_error_callback_create ( const ep_char8_t *message, uint32_t code); static void server_error_callback_close ( const ep_char8_t *message, uint32_t code); static void server_warning_callback ( const ep_char8_t *message, uint32_t code); static bool server_protocol_helper_unknown_command ( DiagnosticsIpcMessage *message, DiagnosticsIpcStream *stream); /* * DiagnosticServer. */ static void server_error_callback_create ( const ep_char8_t *message, uint32_t code) { EP_ASSERT (message != NULL); DS_LOG_ERROR_2 ("Failed to create diagnostic IPC: error (%d): %s.", code, message); } static void server_error_callback_close ( const ep_char8_t *message, uint32_t code) { EP_ASSERT (message != NULL); DS_LOG_ERROR_2 ("Failed to close diagnostic IPC: error (%d): %s.", code, message); } static bool server_protocol_helper_unknown_command ( DiagnosticsIpcMessage *message, DiagnosticsIpcStream *stream) { DS_LOG_WARNING_1 ("Received unknown request type (%d)", ds_ipc_header_get_commandset (ds_ipc_message_get_header_ref (message))); ds_ipc_message_send_error (stream, DS_IPC_E_UNKNOWN_COMMAND); ds_ipc_stream_free (stream); return true; } static void server_warning_callback ( const ep_char8_t *message, uint32_t code) { EP_ASSERT (message != NULL); DS_LOG_WARNING_2 ("warning (%d): %s.", code, message); } EP_RT_DEFINE_THREAD_FUNC (server_thread) { EP_ASSERT (server_volatile_load_shutting_down_state () || ds_ipc_stream_factory_has_active_ports ()); if (!ds_ipc_stream_factory_has_active_ports ()) { #ifndef DS_IPC_DISABLE_LISTEN_PORTS DS_LOG_ERROR_0 ("Diagnostics IPC listener was undefined"); #endif return 1; } while (!server_volatile_load_shutting_down_state ()) { DiagnosticsIpcStream *stream = ds_ipc_stream_factory_get_next_available_stream (server_warning_callback); if (!stream) continue; ds_rt_auto_trace_signal (); DiagnosticsIpcMessage message; if (!ds_ipc_message_init (&message)) continue; if (!ds_ipc_message_initialize_stream (&message, stream)) { ds_ipc_message_send_error (stream, DS_IPC_E_BAD_ENCODING); ds_ipc_stream_free (stream); ds_ipc_message_fini (&message); continue; } if (ep_rt_utf8_string_compare ( (const ep_char8_t *)ds_ipc_header_get_magic_ref (ds_ipc_message_get_header_ref (&message)), (const ep_char8_t *)DOTNET_IPC_V1_MAGIC) != 0) { ds_ipc_message_send_error (stream, DS_IPC_E_UNKNOWN_MAGIC); ds_ipc_stream_free (stream); ds_ipc_message_fini (&message); continue; } DS_LOG_INFO_2 ("DiagnosticServer - received IPC message with command set (%d) and command id (%d)", ds_ipc_header_get_commandset (ds_ipc_message_get_header_ref (&message)), ds_ipc_header_get_commandid (ds_ipc_message_get_header_ref (&message))); switch ((DiagnosticsServerCommandSet)ds_ipc_header_get_commandset (ds_ipc_message_get_header_ref (&message))) { case DS_SERVER_COMMANDSET_EVENTPIPE: ds_eventpipe_protocol_helper_handle_ipc_message (&message, stream); break; case DS_SERVER_COMMANDSET_DUMP: ds_dump_protocol_helper_handle_ipc_message (&message, stream); break; case DS_SERVER_COMMANDSET_PROCESS: ds_process_protocol_helper_handle_ipc_message (&message, stream); break; case DS_SERVER_COMMANDSET_PROFILER: ds_profiler_protocol_helper_handle_ipc_message (&message, stream); break; default: server_protocol_helper_unknown_command (&message, stream); break; } ds_ipc_message_fini (&message); } return (ep_rt_thread_start_func_return_t)0; } void ds_server_disable (void) { _server_disabled = true; } bool ds_server_init (void) { if (!ds_ipc_stream_factory_init ()) return false; if (_server_disabled || !ds_rt_config_value_get_enable ()) return true; bool result = false; // Initialize PAL layer. if (!ds_ipc_pal_init ()) { DS_LOG_ERROR_1 ("Failed to initialize PAL layer (%d).", ep_rt_get_last_error ()); ep_raise_error (); } // Initialize the RuntimeIndentifier before use ds_ipc_advertise_cookie_v1_init (); // Ports can fail to be configured if (!ds_ipc_stream_factory_configure (server_error_callback_create)) DS_LOG_ERROR_0 ("At least one Diagnostic Port failed to be configured."); if (ds_ipc_stream_factory_any_suspended_ports ()) { ep_rt_wait_event_alloc (&_server_resume_runtime_startup_event, true, false); ep_raise_error_if_nok (ep_rt_wait_event_is_valid (&_server_resume_runtime_startup_event)); } if (ds_ipc_stream_factory_has_active_ports ()) { ds_rt_auto_trace_init (); ds_rt_auto_trace_launch (); ep_rt_thread_id_t thread_id = ep_rt_uint64_t_to_thread_id_t (0); if (!ep_rt_thread_create ((void *)server_thread, NULL, EP_THREAD_TYPE_SERVER, (void *)&thread_id)) { // Failed to create IPC thread. ds_ipc_stream_factory_close_ports (NULL); DS_LOG_ERROR_1 ("Failed to create diagnostic server thread (%d).", ep_rt_get_last_error ()); ep_raise_error (); } else { ds_rt_auto_trace_wait (); } } result = true; ep_on_exit: return result; ep_on_error: EP_ASSERT (!result); ep_exit_error_handler (); } bool ds_server_shutdown (void) { server_volatile_store_shutting_down_state (true); if (ds_ipc_stream_factory_has_active_ports ()) ds_ipc_stream_factory_shutdown (server_error_callback_close); ds_ipc_stream_factory_fini (); ds_ipc_pal_shutdown (); return true; } // This method will block runtime bring-up IFF DOTNET_DefaultDiagnosticPortSuspend != NULL and DOTNET_DiagnosticPorts != 0 (it's default state) // The _ds_resume_runtime_startup_event event will be signaled when the Diagnostics Monitor uses the ResumeRuntime Diagnostics IPC Command void ds_server_pause_for_diagnostics_monitor (void) { _is_paused_for_startup = true; if (ds_ipc_stream_factory_any_suspended_ports ()) { EP_ASSERT (ep_rt_wait_event_is_valid (&_server_resume_runtime_startup_event)); DS_LOG_ALWAYS_0 ("The runtime has been configured to pause during startup and is awaiting a Diagnostics IPC ResumeStartup command."); if (ep_rt_wait_event_wait (&_server_resume_runtime_startup_event, 5000, false) != 0) { ds_rt_server_log_pause_message (); DS_LOG_ALWAYS_0 ("The runtime has been configured to pause during startup and is awaiting a Diagnostics IPC ResumeStartup command and has waited 5 seconds."); ep_rt_wait_event_wait (&_server_resume_runtime_startup_event, EP_INFINITE_WAIT, false); } } // allow wait failures to fall through and the runtime to continue coming up } void ds_server_resume_runtime_startup (void) { ds_ipc_stream_factory_resume_current_port (); if (!ds_ipc_stream_factory_any_suspended_ports () && ep_rt_wait_event_is_valid (&_server_resume_runtime_startup_event)) { ep_rt_wait_event_set (&_server_resume_runtime_startup_event); _is_paused_for_startup = false; } } bool ds_server_is_paused_in_startup (void) { return _is_paused_for_startup; } #endif /* !defined(DS_INCLUDE_SOURCE_FILES) || defined(DS_FORCE_INCLUDE_SOURCE_FILES) */ #endif /* ENABLE_PERFTRACING */ #ifndef DS_INCLUDE_SOURCE_FILES extern const char quiet_linker_empty_file_warning_diagnostics_server; const char quiet_linker_empty_file_warning_diagnostics_server = 0; #endif
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/JIT/HardwareIntrinsics/X86/Avx2/ShiftRightLogical128BitLane.SByte.1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void ShiftRightLogical128BitLaneSByte1() { var test = new ImmUnaryOpTest__ShiftRightLogical128BitLaneSByte1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftRightLogical128BitLaneSByte1 { private struct TestStruct { public Vector256<SByte> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (sbyte)8; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref testStruct._fld), ref Unsafe.As<SByte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftRightLogical128BitLaneSByte1 testClass) { var result = Avx2.ShiftRightLogical128BitLane(_fld, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<SByte>>() / sizeof(SByte); private static SByte[] _data = new SByte[Op1ElementCount]; private static Vector256<SByte> _clsVar; private Vector256<SByte> _fld; private SimpleUnaryOpTest__DataTable<SByte, SByte> _dataTable; static ImmUnaryOpTest__ShiftRightLogical128BitLaneSByte1() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (sbyte)8; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref _clsVar), ref Unsafe.As<SByte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); } public ImmUnaryOpTest__ShiftRightLogical128BitLaneSByte1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (sbyte)8; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref _fld), ref Unsafe.As<SByte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (sbyte)8; } _dataTable = new SimpleUnaryOpTest__DataTable<SByte, SByte>(_data, new SByte[RetElementCount], LargestVectorSize); } public bool IsSupported => Avx2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Avx2.ShiftRightLogical128BitLane( Unsafe.Read<Vector256<SByte>>(_dataTable.inArrayPtr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Avx2.ShiftRightLogical128BitLane( Avx.LoadVector256((SByte*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Avx2.ShiftRightLogical128BitLane( Avx.LoadAlignedVector256((SByte*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Avx2).GetMethod(nameof(Avx2.ShiftRightLogical128BitLane), new Type[] { typeof(Vector256<SByte>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<SByte>>(_dataTable.inArrayPtr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<SByte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Avx2).GetMethod(nameof(Avx2.ShiftRightLogical128BitLane), new Type[] { typeof(Vector256<SByte>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadVector256((SByte*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<SByte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Avx2).GetMethod(nameof(Avx2.ShiftRightLogical128BitLane), new Type[] { typeof(Vector256<SByte>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((SByte*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<SByte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Avx2.ShiftRightLogical128BitLane( _clsVar, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector256<SByte>>(_dataTable.inArrayPtr); var result = Avx2.ShiftRightLogical128BitLane(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = Avx.LoadVector256((SByte*)(_dataTable.inArrayPtr)); var result = Avx2.ShiftRightLogical128BitLane(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var firstOp = Avx.LoadAlignedVector256((SByte*)(_dataTable.inArrayPtr)); var result = Avx2.ShiftRightLogical128BitLane(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftRightLogical128BitLaneSByte1(); var result = Avx2.ShiftRightLogical128BitLane(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Avx2.ShiftRightLogical128BitLane(_fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Avx2.ShiftRightLogical128BitLane(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<SByte> firstOp, void* result, [CallerMemberName] string method = "") { SByte[] inArray = new SByte[Op1ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<SByte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { SByte[] inArray = new SByte[Op1ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector256<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<SByte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(SByte[] firstOp, SByte[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != 8) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if ((i == 31 || i == 15 ? result[i] != 0 : result[i] != 8)) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Avx2)}.{nameof(Avx2.ShiftRightLogical128BitLane)}<SByte>(Vector256<SByte><9>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.X86; namespace JIT.HardwareIntrinsics.X86 { public static partial class Program { private static void ShiftRightLogical128BitLaneSByte1() { var test = new ImmUnaryOpTest__ShiftRightLogical128BitLaneSByte1(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); // Validates basic functionality works, using LoadAligned test.RunBasicScenario_LoadAligned(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); // Validates calling via reflection works, using LoadAligned test.RunReflectionScenario_LoadAligned(); } // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (Avx.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); // Validates passing a local works, using LoadAligned test.RunLclVarScenario_LoadAligned(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ImmUnaryOpTest__ShiftRightLogical128BitLaneSByte1 { private struct TestStruct { public Vector256<SByte> _fld; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (sbyte)8; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref testStruct._fld), ref Unsafe.As<SByte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); return testStruct; } public void RunStructFldScenario(ImmUnaryOpTest__ShiftRightLogical128BitLaneSByte1 testClass) { var result = Avx2.ShiftRightLogical128BitLane(_fld, 1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 32; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector256<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector256<SByte>>() / sizeof(SByte); private static SByte[] _data = new SByte[Op1ElementCount]; private static Vector256<SByte> _clsVar; private Vector256<SByte> _fld; private SimpleUnaryOpTest__DataTable<SByte, SByte> _dataTable; static ImmUnaryOpTest__ShiftRightLogical128BitLaneSByte1() { for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (sbyte)8; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref _clsVar), ref Unsafe.As<SByte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); } public ImmUnaryOpTest__ShiftRightLogical128BitLaneSByte1() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (sbyte)8; } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector256<SByte>, byte>(ref _fld), ref Unsafe.As<SByte, byte>(ref _data[0]), (uint)Unsafe.SizeOf<Vector256<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data[i] = (sbyte)8; } _dataTable = new SimpleUnaryOpTest__DataTable<SByte, SByte>(_data, new SByte[RetElementCount], LargestVectorSize); } public bool IsSupported => Avx2.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Avx2.ShiftRightLogical128BitLane( Unsafe.Read<Vector256<SByte>>(_dataTable.inArrayPtr), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Avx2.ShiftRightLogical128BitLane( Avx.LoadVector256((SByte*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunBasicScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_LoadAligned)); var result = Avx2.ShiftRightLogical128BitLane( Avx.LoadAlignedVector256((SByte*)(_dataTable.inArrayPtr)), 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Avx2).GetMethod(nameof(Avx2.ShiftRightLogical128BitLane), new Type[] { typeof(Vector256<SByte>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector256<SByte>>(_dataTable.inArrayPtr), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<SByte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Avx2).GetMethod(nameof(Avx2.ShiftRightLogical128BitLane), new Type[] { typeof(Vector256<SByte>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadVector256((SByte*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<SByte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunReflectionScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_LoadAligned)); var result = typeof(Avx2).GetMethod(nameof(Avx2.ShiftRightLogical128BitLane), new Type[] { typeof(Vector256<SByte>), typeof(byte) }) .Invoke(null, new object[] { Avx.LoadAlignedVector256((SByte*)(_dataTable.inArrayPtr)), (byte)1 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector256<SByte>)(result)); ValidateResult(_dataTable.inArrayPtr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Avx2.ShiftRightLogical128BitLane( _clsVar, 1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var firstOp = Unsafe.Read<Vector256<SByte>>(_dataTable.inArrayPtr); var result = Avx2.ShiftRightLogical128BitLane(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var firstOp = Avx.LoadVector256((SByte*)(_dataTable.inArrayPtr)); var result = Avx2.ShiftRightLogical128BitLane(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunLclVarScenario_LoadAligned() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_LoadAligned)); var firstOp = Avx.LoadAlignedVector256((SByte*)(_dataTable.inArrayPtr)); var result = Avx2.ShiftRightLogical128BitLane(firstOp, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(firstOp, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ImmUnaryOpTest__ShiftRightLogical128BitLaneSByte1(); var result = Avx2.ShiftRightLogical128BitLane(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Avx2.ShiftRightLogical128BitLane(_fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Avx2.ShiftRightLogical128BitLane(test._fld, 1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector256<SByte> firstOp, void* result, [CallerMemberName] string method = "") { SByte[] inArray = new SByte[Op1ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray[0]), firstOp); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<SByte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(void* firstOp, void* result, [CallerMemberName] string method = "") { SByte[] inArray = new SByte[Op1ElementCount]; SByte[] outArray = new SByte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray[0]), ref Unsafe.AsRef<byte>(firstOp), (uint)Unsafe.SizeOf<Vector256<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector256<SByte>>()); ValidateResult(inArray, outArray, method); } private void ValidateResult(SByte[] firstOp, SByte[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != 8) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if ((i == 31 || i == 15 ? result[i] != 0 : result[i] != 8)) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Avx2)}.{nameof(Avx2.ShiftRightLogical128BitLane)}<SByte>(Vector256<SByte><9>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/JIT/Generics/Instantiation/delegates/Delegate021.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="Delegate021.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>PdbOnly</DebugType> </PropertyGroup> <ItemGroup> <Compile Include="Delegate021.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/tracing/common/Assert.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace Tracing.Tests.Common { public static class Assert { public static void True(string name, bool condition) { if (!condition) { throw new Exception( string.Format("Condition '{0}' is not true", name)); } } public static void Equal<T>(string name, T left, T right) where T : IEquatable<T> { if (left == null && right != null) { throw new Exception( string.Format("Values for '{0}' are not equal! Left=NULL Right='{1}'", name, right)); } else if (left != null && right == null) { throw new Exception( string.Format("Values for '{0}' are not equal! Left='{1}' Right=NULL", name, left)); } else if (!left.Equals(right)) { throw new Exception( string.Format("Values for '{0}' are not equal! Left='{1}' Right='{2}'", name, left, right)); } } public static void NotEqual<T>(string name, T left, T right) where T : IEquatable<T> { if (left == null && right == null) { throw new Exception( string.Format("Values for '{0}' are equal! Left=NULL Right=NULL", name)); } else if (left != null && left.Equals(right)) { throw new Exception( string.Format("Values for '{0}' are equal! Left='{1}' Right='{2}'", name, left, right)); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace Tracing.Tests.Common { public static class Assert { public static void True(string name, bool condition) { if (!condition) { throw new Exception( string.Format("Condition '{0}' is not true", name)); } } public static void Equal<T>(string name, T left, T right) where T : IEquatable<T> { if (left == null && right != null) { throw new Exception( string.Format("Values for '{0}' are not equal! Left=NULL Right='{1}'", name, right)); } else if (left != null && right == null) { throw new Exception( string.Format("Values for '{0}' are not equal! Left='{1}' Right=NULL", name, left)); } else if (!left.Equals(right)) { throw new Exception( string.Format("Values for '{0}' are not equal! Left='{1}' Right='{2}'", name, left, right)); } } public static void NotEqual<T>(string name, T left, T right) where T : IEquatable<T> { if (left == null && right == null) { throw new Exception( string.Format("Values for '{0}' are equal! Left=NULL Right=NULL", name)); } else if (left != null && left.Equals(right)) { throw new Exception( string.Format("Values for '{0}' are equal! Left='{1}' Right='{2}'", name, left, right)); } } } }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/libraries/System.Dynamic.Runtime/tests/Dynamic.Context/Conformance.dynamic.context.property.regproperty.regclass.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclassregprop.regclassregprop; using Xunit; namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclassregprop.regclassregprop { public class MyClass { public int Field = 0; } public struct MyStruct { public int Number; } public enum MyEnum { First = 1, Second = 2, Third = 3 } public class MemberClass { /* Example of calling it: MemberClass staticMC =new MemberClass(); dynamic mc = staticMC; bool myBool; //This test the getter for the property myBool = true; staticMC.myBool = myBool; //We set the inner field myBool = mc.Property_bool; //We use the property to get the field if (myBool != true) return 1; //This tests the setter for the property myBool = true; mc.Property_bool = myBool; //We set the property myBool = statMc.myBool; // We get the inner field if (myBool != true) return 1; */ public bool myBool = true; public bool? myBoolNull = true; public bool?[] myBoolNullArr = new bool?[2]; public bool[] myBoolArr = new bool[2]; public char myChar = 'a'; public char? myCharNull = 'a'; public char?[] myCharNullArr = new char?[2]; public char[] myCharArr = new char[2]; public decimal myDecimal = 1m; public decimal? myDecimalNull = 1m; public decimal?[] myDecimalNullArr = new decimal?[2]; public decimal[] myDecimalArr = new decimal[2]; public dynamic myDynamic = new object(); public float myFloat = 3f; public float?[] myFloatNullArr = new float?[] { } ; public MyClass myClass = new MyClass() { Field = 2 } ; public MyClass[] myClassArr = new MyClass[3]; public MyEnum myEnum = MyEnum.First; public MyEnum? myEnumNull = MyEnum.First; public MyEnum?[] myEnumNullArr = new MyEnum?[3]; public MyEnum[] myEnumArr = new MyEnum[3]; public MyStruct myStruct = new MyStruct() { Number = 3 } ; public MyStruct? myStructNull = new MyStruct() { Number = 3 } ; public MyStruct?[] myStructNullArr = new MyStruct?[3]; public MyStruct[] myStructArr = new MyStruct[3]; public short myShort = 1; public short? myShortNull = 1; public short?[] myShortNullArr = new short?[2]; public short[] myShortArr = new short[2]; public string myString = string.Empty; public string[] myStringArr = new string[2]; public ulong myUlong = 1; public ulong? myUlongNull = 1; public ulong?[] myUlongNullArr = new ulong?[2]; public ulong[] myUlongArr = new ulong[2]; public bool Property_bool { protected set { myBool = value; } get { return false; } } public bool? Property_boolNull { protected set { myBoolNull = value; } get { return null; } } public bool?[] Property_boolNullArr { protected set { myBoolNullArr = value; } get { return new bool?[] { true, null, false } ; } } public bool[] Property_boolArr { protected set { myBoolArr = value; } get { return new bool[] { true, false } ; } } public char Property_char { private set { myChar = value; } get { return myChar; } } public char? Property_charNull { private set { myCharNull = value; } get { return myCharNull; } } public char?[] Property_charNullArr { private set { myCharNullArr = value; } get { return myCharNullArr; } } public char[] Property_charArr { private set { myCharArr = value; } get { return myCharArr; } } public decimal Property_decimal { internal set { myDecimal = value; } get { return myDecimal; } } public decimal? Property_decimalNull { internal set { myDecimalNull = value; } get { return myDecimalNull; } } public decimal?[] Property_decimalNullArr { protected internal set { myDecimalNullArr = value; } get { return myDecimalNullArr; } } public decimal[] Property_decimalArr { protected internal set { myDecimalArr = value; } get { return myDecimalArr; } } public dynamic Property_dynamic { get { return myDynamic; } set { myDynamic = value; } } public float Property_Float { get { return myFloat; } set { myFloat = value; } } public float?[] Property_FloatNullArr { get { return myFloatNullArr; } set { myFloatNullArr = value; } } public MyClass Property_MyClass { set { myClass = value; } } public MyClass[] Property_MyClassArr { set { myClassArr = value; } } public MyEnum Property_MyEnum { set { myEnum = value; } get { return myEnum; } } public MyEnum? Property_MyEnumNull { set { myEnumNull = value; } private get { return myEnumNull; } } public MyEnum?[] Property_MyEnumNullArr { set { myEnumNullArr = value; } private get { return myEnumNullArr; } } public MyEnum[] Property_MyEnumArr { set { myEnumArr = value; } private get { return myEnumArr; } } public MyStruct Property_MyStruct { get { return myStruct; } set { myStruct = value; } } public MyStruct? Property_MyStructNull { get { return myStructNull; } } public MyStruct?[] Property_MyStructNullArr { get { return myStructNullArr; } } public MyStruct[] Property_MyStructArr { get { return myStructArr; } } public short Property_short { set { myShort = value; } protected get { return myShort; } } public short? Property_shortNull { set { myShortNull = value; } protected get { return myShortNull; } } public short?[] Property_shortNullArr { set { myShortNullArr = value; } protected get { return myShortNullArr; } } public short[] Property_shortArr { set { myShortArr = value; } protected get { return myShortArr; } } public string Property_string { set { myString = value; } get { return myString; } } public string[] Property_stringArr { set { myStringArr = value; } get { return myStringArr; } } public ulong Property_ulong { set { myUlong = value; } protected internal get { return myUlong; } } public ulong? Property_ulongNull { set { myUlongNull = value; } protected internal get { return myUlongNull; } } public ulong?[] Property_ulongNullArr { set { myUlongNullArr = value; } protected internal get { return myUlongNullArr; } } public ulong[] Property_ulongArr { set { myUlongArr = value; } protected internal get { return myUlongArr; } } public static bool myBoolStatic; public static MyClass myClassStatic = new MyClass(); public static bool Property_boolStatic { protected set { myBoolStatic = value; } get { return myBoolStatic; } } } } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass001.regclass001 { // <Title> Tests regular class regular property used in static method body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { return Test.TestGetMethod(new MemberClass()) + Test.TestSetMethod(new MemberClass()) == 0 ? 0 : 1; } public static int TestGetMethod(MemberClass mc) { dynamic dy = mc; dy.myBool = true; if (dy.Property_bool) //always return false return 1; else return 0; } public static int TestSetMethod(MemberClass mc) { dynamic dy = mc; try { dy.Property_bool = true; } catch (Microsoft.CSharp.RuntimeBinder.RuntimeBinderException e) { if (ErrorVerifier.Verify(ErrorMessageId.InaccessibleSetter, e.Message, "MemberClass.Property_bool", "set")) return 0; } return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass002.regclass002 { // <Title> Tests regular class regular property used in arguments of method invocation.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { private delegate int TestDec(char[] c); private static char[] s_charArray = new char[] { '0', 'a' }; [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test t = new Test(); TestDec td = t.TestMethod; MemberClass mc = new MemberClass(); mc.myCharArr = s_charArray; dynamic dy = mc; return td((char[])dy.Property_charArr); } public int TestMethod(char[] c) { if (ReferenceEquals(c, s_charArray) && c[0] == '0' && c[1] == 'a') return 0; else return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass003.regclass003 { // <Title> Tests regular class regular property used in property-set body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { private static dynamic s_mc = new MemberClass(); [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Dec = new short?[] { null, 0, -1 } ; if (s_mc.myShortNullArr[0] == null && s_mc.myShortNullArr[1] == 0 && s_mc.myShortNullArr[2] == -1) return 0; return 1; } public static short?[] Dec { set { s_mc.Property_shortNullArr = value; } get { return null; } } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass004.regclass004 { // <Title> Tests regular class regular property used in short-circuit boolean expression and ternary operator expression.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { dynamic dy = new MemberClass(); int loopCount = 0; dy.Property_decimal = 0M; while (dy.Property_decimal < 10) { System.Console.WriteLine((object)dy.Property_decimal); dy.Property_decimal++; loopCount++; } return (dy.Property_decimal == 10 && loopCount == 10) ? 0 : 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass005.regclass005 { // <Title> Tests regular class regular property used in property set.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test t = new Test(); try { t.TestProperty = null; //protected, should have exception return 1; } catch (Microsoft.CSharp.RuntimeBinder.RuntimeBinderException e) { if (ErrorVerifier.Verify(ErrorMessageId.InaccessibleSetter, e.Message, "MemberClass.Property_boolNull")) return 0; } return 1; } public bool? TestProperty { set { dynamic dy = new MemberClass(); dy.Property_boolNull = value; } } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass006.regclass006 { // <Title> Tests regular class regular property used in property get body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test t = new Test(); bool?[] array = t.TestProperty; if (array.Length == 3 && array[0] == true && array[1] == null && array[2] == false) return 0; return 1; } public bool?[] TestProperty { get { dynamic dy = new MemberClass(); return dy.Property_boolNullArr; } } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass007.regclass007 { // <Title> Tests regular class regular property used in static method body.</Title> // <Description> // Derived class call protected parent property. // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test : MemberClass { public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test mc = new Test(); dynamic dy = mc; dy.Property_boolArr = new bool[3]; bool[] result = dy.Property_boolArr; if (result.Length != 2 || result[0] != true || result[1] != false) return 1; return 0; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass008.regclass008 { // <Title> Tests regular class regular property used in indexer.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System.Collections.Generic; public class Test { private Dictionary<string, int> _dic = new Dictionary<string, int>(); private MemberClass _mc = new MemberClass(); [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { if (TestSet() == 0 && TestGet() == 0) return 0; else return 1; } private static int TestSet() { Test t = new Test(); t["a"] = 10; t[string.Empty] = -1; if (t._dic["a"] == 10 && t._dic[string.Empty] == -1 && (string)t._mc.Property_string == string.Empty) return 0; else return 1; } private static int TestGet() { Test t = new Test(); t._dic["Test0"] = 2; if (t["Test0"] == 2) return 0; else return 1; } public int this[string i] { set { dynamic dy = _mc; dy.Property_string = i; _dic.Add((string)dy.Property_string, value); _mc = dy; //this is to circumvent the boxing of the struct } get { _mc.Property_string = i; dynamic dy = _mc; return _dic[(string)dy.Property_string]; } } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass010.regclass010 { // <Title> Tests regular class regular property used in try/catch/finally.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; dy.myChar = 'a'; try { dy.Property_char = 'x'; //private, should have exception. return 1; } catch (Microsoft.CSharp.RuntimeBinder.RuntimeBinderException e) { if (!ErrorVerifier.Verify(ErrorMessageId.InaccessibleSetter, e.Message, "MemberClass.Property_char")) return 1; if ((char)dy.Property_char != 'a') return 1; } finally { dy.myChar = 'b'; } if ((char)dy.Property_char != 'b') return 1; return 0; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass011.regclass011 { // <Title> Tests regular class regular property used in anonymous method.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System; public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; Func<char, char?> func = delegate (char arg) { mc.myCharNull = arg; dy = mc; // struct need to re-assign the value. return dy.Property_charNull; } ; char? result = func('a'); if (result == 'a') return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass012.regclass012 { // <Title> Tests regular class regular property used in lambda expression.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System.Linq; public class Test { public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { return TestGet() + TestSet(); } private static int TestSet() { MemberClass mc = new MemberClass(); dynamic dy = mc; dy.Property_ulongArr = new ulong[] { 1, 2, 3, 4, 3, 4 } ; dy.Property_ulong = (ulong)4; var list = mc.Property_ulongArr.Where(p => p == (ulong)mc.Property_ulong).ToList(); return list.Count - 2; } private static int TestGet() { MemberClass mc = new MemberClass(); dynamic dy = mc; mc.Property_ulongArr = new ulong[] { 1, 2, 3, 4, 3, 4 } ; mc.Property_ulong = 4; var list = ((ulong[])dy.Property_ulongArr).Where(p => p == (ulong)dy.Property_ulong).ToList(); return list.Count - 2; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass013.regclass013 { // <Title> Tests regular class regular property used in the foreach loop body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; char result = default(char); char[] LoopArray = new char[] { 'a', 'b' } ; foreach (char c in LoopArray) { mc.myCharNullArr = new char?[] { c } ; dy = mc; result = ((char?[])dy.myCharNullArr)[0].Value; } if (result == 'b') return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass014.regclass014 { // <Title> Tests regular class regular property used in do/while expression.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.Property_decimalNull = -10.1M; dynamic dy = mc; do { mc.Property_decimalNull += 1; dy = mc; // for struct we should re-assign. } while ((decimal?)dy.Property_decimalNull < 0M); if ((decimal?)mc.Property_decimalNull == 0.9M) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass017.regclass017 { // <Title> Tests regular class regular property used in using expression.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System.IO; public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.myChar = (char)256; dynamic dy = mc; using (MemoryStream ms = new MemoryStream((int)dy.Property_char)) { if (ms.Capacity != 256) return 1; } return 0; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass018.regclass018 { // <Title> Tests regular class regular property used in try/catch/finally.</Title> // <Description> // try/catch/finally that uses an anonymous method and refer two dynamic parameters. // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System; public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.Property_decimalNullArr = new decimal?[] { 0M, 1M, 1.3M } ; mc.myStruct = new MyStruct() { Number = 3 } ; dynamic dy = mc; int result = -1; try { Func<decimal?[], MyStruct, int> func = delegate (decimal?[] x, MyStruct y) { int tmp = 0; foreach (decimal? d in x) { tmp += (int)d.Value; } tmp += y.Number; return tmp; } ; result = func((decimal?[])dy.Property_decimalNullArr, (MyStruct)dy.Property_MyStruct); } finally { result += (int)dy.Property_MyStruct.Number; } if (result != 8) return 1; return 0; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass019.regclass019 { // <Title> Tests regular class regular property used in foreach.</Title> // <Description> // foreach inside a using statement that uses the dynamic introduced by the using statement. // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System.IO; public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { int[] array = new int[] { 1, 2 } ; MemoryStream ms = new MemoryStream(new byte[] { 84, 101, 115, 116 } ); //Test MemberClass mc = new MemberClass(); mc.Property_dynamic = true; dynamic dy = mc; string result = string.Empty; using (dynamic sr = new StreamReader(ms, (bool)dy.Property_dynamic)) { foreach (int s in array) { ms.Position = 0; string m = ((StreamReader)sr).ReadToEnd(); result += m + s.ToString(); } } //Test1Test2 if (result == "Test1Test2") return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass020.regclass020 { // <Title> Tests regular class regular property used in iterator that calls to a lambda expression.</Title> // <Description> // foreach inside a using statement that uses the dynamic introduced by the using statement. // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System; using System.Collections; public class Test { private static MemberClass s_mc; private static dynamic s_dy; static Test() { s_mc = new MemberClass(); } [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { decimal index = 1M; s_mc.Property_decimalArr = new decimal[] { 1M, 2M, 3M } ; s_dy = s_mc; Test t = new Test(); foreach (decimal i in t.Increment(0)) { if (i != index) return 1; index = index + 1; } if (index != 4) return 1; return 0; } public IEnumerable Increment(int number) { while (number < s_mc.Property_decimalArr.Length) { Func<decimal[], decimal> func = (decimal[] x) => x[number++]; yield return func(s_dy.Property_decimalArr); } } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass021.regclass021 { // <Title> Tests regular class regular property used in object initializer inside a collection initializer.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System.Collections.Generic; public class Test { private float _field1; private float?[] _field2; [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.Property_Float = 1.23f; mc.Property_FloatNullArr = new float?[] { null, 1.33f } ; dynamic dy = mc; List<Test> list = new List<Test>() { new Test() { _field1 = dy.Property_Float, _field2 = dy.Property_FloatNullArr } } ; if (list.Count == 1 && list[0]._field1 == 1.23f && list[0]._field2.Length == 2 && list[0]._field2[0] == null && list[0]._field2[1] == 1.33f) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass022.regclass022 { // <Title> Tests regular class regular property used in static method body.</Title> // <Description> // set only property access. // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; dy.Property_MyClass = new MyClass() { Field = -1 } ; mc = dy; //to circumvent the boxing of the struct if (mc.myClass.Field == -1) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass023.regclass023 { // <Title> Tests regular class regular property used in static method body.</Title> // <Description> // Negative: set only property access // </Description> // <RelatedBugs></RelatedBugs> // <Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; dy.Property_MyClass = new MyClass() { Field = -1 } ; mc = dy; //to circumvent the boxing of the struct if (mc.myClass.Field != -1) return 1; return 0; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass024.regclass024 { // <Title> Tests regular class regular property used in throws.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System; public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; dy.Property_string = "Test Message"; try { throw new ArithmeticException((string)dy.Property_string); } catch (ArithmeticException ae) { if (ae.Message == "Test Message") return 0; } return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass025.regclass025 { // <Title> Tests regular class regular property used in field initializer.</Title> // <Description> // Negative // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { private static MemberClass s_mc; private static dynamic s_dy; private MyEnum _me = s_dy.Property_MyEnum; static Test() { s_mc = new MemberClass(); s_dy = s_mc; s_dy.Property_MyEnum = MyEnum.Third; } [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test t = new Test(); if (t._me != MyEnum.Third) return 1; return 0; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass026.regclass026 { // <Title> Tests regular class regular property used in set only property body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { private MemberClass _mc; private MyEnum? MyProp { set { _mc = new MemberClass(); dynamic dy = _mc; dy.Property_MyEnumNull = value; _mc = dy; // for struct. } } [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test t = new Test(); t.MyProp = MyEnum.Second; if (t._mc.myEnumNull == MyEnum.Second) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass027.regclass027 { // <Title> Tests regular class regular property used in read only property body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { private MyEnum MyProp { get { dynamic dy = new MemberClass(); dy.Property_MyEnumArr = new MyEnum[] { MyEnum.Second, default (MyEnum)} ; return dy.myEnumArr[0]; } } [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test t = new Test(); if (t.MyProp == MyEnum.Second) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass028.regclass028 { // <Title> Tests regular class regular property used in static read only property body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { private static MyEnum? MyProp { get { dynamic dy = new MemberClass(); dy.Property_MyEnumNullArr = new MyEnum?[] { null, MyEnum.Second, default (MyEnum)} ; return dy.myEnumNullArr[0]; } } [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { if (Test.MyProp == null) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass029.regclass029 { // <Title> Tests regular class regular property used in static method body.</Title> // <Description> // get only property access. // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.myStructNull = new MyStruct() { Number = int.MinValue } ; dynamic dy = mc; MyStruct? result = dy.Property_MyStructNull; if (result.Value.Number == int.MinValue) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass030.regclass030 { // <Title> Tests regular class regular property used in method call argument.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.myStructArr = new MyStruct[] { new MyStruct() { Number = 1 } , new MyStruct() { Number = -1 } } ; dynamic dy = mc; bool result = TestMethod(1, string.Empty, (MyStruct[])dy.Property_MyStructArr); if (result) return 0; return 1; } private static bool TestMethod<V, U>(V v, U u, params MyStruct[] ms) { if (v.GetType() != typeof(int)) return false; if (u.GetType() != typeof(string)) return false; if (ms.Length != 2 || ms[0].Number != 1 || ms[1].Number != -1) return false; return true; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass031.regclass031 { // <Title> Tests regular class regular property used in static method body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; mc.myStructNullArr = new MyStruct?[] { null, new MyStruct() { Number = -1 } } ; if (((MyStruct?[])dy.Property_MyStructNullArr)[0] == null) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass032.regclass032 { // <Title> Tests regular class regular property used in lock expression.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test : MemberClass { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; dy.Property_shortNull = (short)-1; try { lock (dy.Property_shortNull) { } } catch (Microsoft.CSharp.RuntimeBinder.RuntimeBinderException e) { if (ErrorVerifier.Verify(ErrorMessageId.BadProtectedAccess, e.Message, "MemberClass.Property_shortNull", "MemberClass", "Test")) return 0; } return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass034.regclass034 { // <Title> Tests regular class regular property used in foreach loop.</Title> // <Description> // Negative // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.Property_shortArr = new short[] { 1, 2, 3, 4, 5 } ; dynamic dy = mc; short i = 1; try { foreach (var x in dy.Property_shortArr) //protected { if (i++ != (short)x) return 1; } } catch (Microsoft.CSharp.RuntimeBinder.RuntimeBinderException e) { if (ErrorVerifier.Verify(ErrorMessageId.InaccessibleGetter, e.Message, "MemberClass.Property_shortArr")) return 0; } return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass035.regclass035 { // <Title> Tests regular class regular property used in method body.</Title> // <Description> // Negative // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { return Test.TestGetMethod(new MemberClass()) + Test.TestSetMethod(new MemberClass()) == 0 ? 0 : 1; } public static int TestGetMethod(MemberClass mc) { dynamic dy = mc; mc.Property_ulongNullArr = new ulong?[] { null, 1 } ; if (dy.Property_ulongNullArr.Length == 2 && dy.Property_ulongNullArr[0] == null && dy.Property_ulongNullArr[1] == 1) return 0; else return 1; } public static int TestSetMethod(MemberClass mc) { dynamic dy = mc; dy.Property_ulongNullArr = new ulong?[] { null, 1 } ; if (mc.Property_ulongNullArr.Length == 2 && mc.Property_ulongNullArr[0] == null && mc.Property_ulongNullArr[1] == 1) return 0; else return 1; } } //</Code> }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclassregprop.regclassregprop; using Xunit; namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclassregprop.regclassregprop { public class MyClass { public int Field = 0; } public struct MyStruct { public int Number; } public enum MyEnum { First = 1, Second = 2, Third = 3 } public class MemberClass { /* Example of calling it: MemberClass staticMC =new MemberClass(); dynamic mc = staticMC; bool myBool; //This test the getter for the property myBool = true; staticMC.myBool = myBool; //We set the inner field myBool = mc.Property_bool; //We use the property to get the field if (myBool != true) return 1; //This tests the setter for the property myBool = true; mc.Property_bool = myBool; //We set the property myBool = statMc.myBool; // We get the inner field if (myBool != true) return 1; */ public bool myBool = true; public bool? myBoolNull = true; public bool?[] myBoolNullArr = new bool?[2]; public bool[] myBoolArr = new bool[2]; public char myChar = 'a'; public char? myCharNull = 'a'; public char?[] myCharNullArr = new char?[2]; public char[] myCharArr = new char[2]; public decimal myDecimal = 1m; public decimal? myDecimalNull = 1m; public decimal?[] myDecimalNullArr = new decimal?[2]; public decimal[] myDecimalArr = new decimal[2]; public dynamic myDynamic = new object(); public float myFloat = 3f; public float?[] myFloatNullArr = new float?[] { } ; public MyClass myClass = new MyClass() { Field = 2 } ; public MyClass[] myClassArr = new MyClass[3]; public MyEnum myEnum = MyEnum.First; public MyEnum? myEnumNull = MyEnum.First; public MyEnum?[] myEnumNullArr = new MyEnum?[3]; public MyEnum[] myEnumArr = new MyEnum[3]; public MyStruct myStruct = new MyStruct() { Number = 3 } ; public MyStruct? myStructNull = new MyStruct() { Number = 3 } ; public MyStruct?[] myStructNullArr = new MyStruct?[3]; public MyStruct[] myStructArr = new MyStruct[3]; public short myShort = 1; public short? myShortNull = 1; public short?[] myShortNullArr = new short?[2]; public short[] myShortArr = new short[2]; public string myString = string.Empty; public string[] myStringArr = new string[2]; public ulong myUlong = 1; public ulong? myUlongNull = 1; public ulong?[] myUlongNullArr = new ulong?[2]; public ulong[] myUlongArr = new ulong[2]; public bool Property_bool { protected set { myBool = value; } get { return false; } } public bool? Property_boolNull { protected set { myBoolNull = value; } get { return null; } } public bool?[] Property_boolNullArr { protected set { myBoolNullArr = value; } get { return new bool?[] { true, null, false } ; } } public bool[] Property_boolArr { protected set { myBoolArr = value; } get { return new bool[] { true, false } ; } } public char Property_char { private set { myChar = value; } get { return myChar; } } public char? Property_charNull { private set { myCharNull = value; } get { return myCharNull; } } public char?[] Property_charNullArr { private set { myCharNullArr = value; } get { return myCharNullArr; } } public char[] Property_charArr { private set { myCharArr = value; } get { return myCharArr; } } public decimal Property_decimal { internal set { myDecimal = value; } get { return myDecimal; } } public decimal? Property_decimalNull { internal set { myDecimalNull = value; } get { return myDecimalNull; } } public decimal?[] Property_decimalNullArr { protected internal set { myDecimalNullArr = value; } get { return myDecimalNullArr; } } public decimal[] Property_decimalArr { protected internal set { myDecimalArr = value; } get { return myDecimalArr; } } public dynamic Property_dynamic { get { return myDynamic; } set { myDynamic = value; } } public float Property_Float { get { return myFloat; } set { myFloat = value; } } public float?[] Property_FloatNullArr { get { return myFloatNullArr; } set { myFloatNullArr = value; } } public MyClass Property_MyClass { set { myClass = value; } } public MyClass[] Property_MyClassArr { set { myClassArr = value; } } public MyEnum Property_MyEnum { set { myEnum = value; } get { return myEnum; } } public MyEnum? Property_MyEnumNull { set { myEnumNull = value; } private get { return myEnumNull; } } public MyEnum?[] Property_MyEnumNullArr { set { myEnumNullArr = value; } private get { return myEnumNullArr; } } public MyEnum[] Property_MyEnumArr { set { myEnumArr = value; } private get { return myEnumArr; } } public MyStruct Property_MyStruct { get { return myStruct; } set { myStruct = value; } } public MyStruct? Property_MyStructNull { get { return myStructNull; } } public MyStruct?[] Property_MyStructNullArr { get { return myStructNullArr; } } public MyStruct[] Property_MyStructArr { get { return myStructArr; } } public short Property_short { set { myShort = value; } protected get { return myShort; } } public short? Property_shortNull { set { myShortNull = value; } protected get { return myShortNull; } } public short?[] Property_shortNullArr { set { myShortNullArr = value; } protected get { return myShortNullArr; } } public short[] Property_shortArr { set { myShortArr = value; } protected get { return myShortArr; } } public string Property_string { set { myString = value; } get { return myString; } } public string[] Property_stringArr { set { myStringArr = value; } get { return myStringArr; } } public ulong Property_ulong { set { myUlong = value; } protected internal get { return myUlong; } } public ulong? Property_ulongNull { set { myUlongNull = value; } protected internal get { return myUlongNull; } } public ulong?[] Property_ulongNullArr { set { myUlongNullArr = value; } protected internal get { return myUlongNullArr; } } public ulong[] Property_ulongArr { set { myUlongArr = value; } protected internal get { return myUlongArr; } } public static bool myBoolStatic; public static MyClass myClassStatic = new MyClass(); public static bool Property_boolStatic { protected set { myBoolStatic = value; } get { return myBoolStatic; } } } } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass001.regclass001 { // <Title> Tests regular class regular property used in static method body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { return Test.TestGetMethod(new MemberClass()) + Test.TestSetMethod(new MemberClass()) == 0 ? 0 : 1; } public static int TestGetMethod(MemberClass mc) { dynamic dy = mc; dy.myBool = true; if (dy.Property_bool) //always return false return 1; else return 0; } public static int TestSetMethod(MemberClass mc) { dynamic dy = mc; try { dy.Property_bool = true; } catch (Microsoft.CSharp.RuntimeBinder.RuntimeBinderException e) { if (ErrorVerifier.Verify(ErrorMessageId.InaccessibleSetter, e.Message, "MemberClass.Property_bool", "set")) return 0; } return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass002.regclass002 { // <Title> Tests regular class regular property used in arguments of method invocation.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { private delegate int TestDec(char[] c); private static char[] s_charArray = new char[] { '0', 'a' }; [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test t = new Test(); TestDec td = t.TestMethod; MemberClass mc = new MemberClass(); mc.myCharArr = s_charArray; dynamic dy = mc; return td((char[])dy.Property_charArr); } public int TestMethod(char[] c) { if (ReferenceEquals(c, s_charArray) && c[0] == '0' && c[1] == 'a') return 0; else return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass003.regclass003 { // <Title> Tests regular class regular property used in property-set body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { private static dynamic s_mc = new MemberClass(); [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Dec = new short?[] { null, 0, -1 } ; if (s_mc.myShortNullArr[0] == null && s_mc.myShortNullArr[1] == 0 && s_mc.myShortNullArr[2] == -1) return 0; return 1; } public static short?[] Dec { set { s_mc.Property_shortNullArr = value; } get { return null; } } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass004.regclass004 { // <Title> Tests regular class regular property used in short-circuit boolean expression and ternary operator expression.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { dynamic dy = new MemberClass(); int loopCount = 0; dy.Property_decimal = 0M; while (dy.Property_decimal < 10) { System.Console.WriteLine((object)dy.Property_decimal); dy.Property_decimal++; loopCount++; } return (dy.Property_decimal == 10 && loopCount == 10) ? 0 : 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass005.regclass005 { // <Title> Tests regular class regular property used in property set.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test t = new Test(); try { t.TestProperty = null; //protected, should have exception return 1; } catch (Microsoft.CSharp.RuntimeBinder.RuntimeBinderException e) { if (ErrorVerifier.Verify(ErrorMessageId.InaccessibleSetter, e.Message, "MemberClass.Property_boolNull")) return 0; } return 1; } public bool? TestProperty { set { dynamic dy = new MemberClass(); dy.Property_boolNull = value; } } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass006.regclass006 { // <Title> Tests regular class regular property used in property get body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test t = new Test(); bool?[] array = t.TestProperty; if (array.Length == 3 && array[0] == true && array[1] == null && array[2] == false) return 0; return 1; } public bool?[] TestProperty { get { dynamic dy = new MemberClass(); return dy.Property_boolNullArr; } } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass007.regclass007 { // <Title> Tests regular class regular property used in static method body.</Title> // <Description> // Derived class call protected parent property. // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test : MemberClass { public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test mc = new Test(); dynamic dy = mc; dy.Property_boolArr = new bool[3]; bool[] result = dy.Property_boolArr; if (result.Length != 2 || result[0] != true || result[1] != false) return 1; return 0; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass008.regclass008 { // <Title> Tests regular class regular property used in indexer.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System.Collections.Generic; public class Test { private Dictionary<string, int> _dic = new Dictionary<string, int>(); private MemberClass _mc = new MemberClass(); [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { if (TestSet() == 0 && TestGet() == 0) return 0; else return 1; } private static int TestSet() { Test t = new Test(); t["a"] = 10; t[string.Empty] = -1; if (t._dic["a"] == 10 && t._dic[string.Empty] == -1 && (string)t._mc.Property_string == string.Empty) return 0; else return 1; } private static int TestGet() { Test t = new Test(); t._dic["Test0"] = 2; if (t["Test0"] == 2) return 0; else return 1; } public int this[string i] { set { dynamic dy = _mc; dy.Property_string = i; _dic.Add((string)dy.Property_string, value); _mc = dy; //this is to circumvent the boxing of the struct } get { _mc.Property_string = i; dynamic dy = _mc; return _dic[(string)dy.Property_string]; } } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass010.regclass010 { // <Title> Tests regular class regular property used in try/catch/finally.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; dy.myChar = 'a'; try { dy.Property_char = 'x'; //private, should have exception. return 1; } catch (Microsoft.CSharp.RuntimeBinder.RuntimeBinderException e) { if (!ErrorVerifier.Verify(ErrorMessageId.InaccessibleSetter, e.Message, "MemberClass.Property_char")) return 1; if ((char)dy.Property_char != 'a') return 1; } finally { dy.myChar = 'b'; } if ((char)dy.Property_char != 'b') return 1; return 0; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass011.regclass011 { // <Title> Tests regular class regular property used in anonymous method.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System; public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; Func<char, char?> func = delegate (char arg) { mc.myCharNull = arg; dy = mc; // struct need to re-assign the value. return dy.Property_charNull; } ; char? result = func('a'); if (result == 'a') return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass012.regclass012 { // <Title> Tests regular class regular property used in lambda expression.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System.Linq; public class Test { public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { return TestGet() + TestSet(); } private static int TestSet() { MemberClass mc = new MemberClass(); dynamic dy = mc; dy.Property_ulongArr = new ulong[] { 1, 2, 3, 4, 3, 4 } ; dy.Property_ulong = (ulong)4; var list = mc.Property_ulongArr.Where(p => p == (ulong)mc.Property_ulong).ToList(); return list.Count - 2; } private static int TestGet() { MemberClass mc = new MemberClass(); dynamic dy = mc; mc.Property_ulongArr = new ulong[] { 1, 2, 3, 4, 3, 4 } ; mc.Property_ulong = 4; var list = ((ulong[])dy.Property_ulongArr).Where(p => p == (ulong)dy.Property_ulong).ToList(); return list.Count - 2; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass013.regclass013 { // <Title> Tests regular class regular property used in the foreach loop body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; char result = default(char); char[] LoopArray = new char[] { 'a', 'b' } ; foreach (char c in LoopArray) { mc.myCharNullArr = new char?[] { c } ; dy = mc; result = ((char?[])dy.myCharNullArr)[0].Value; } if (result == 'b') return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass014.regclass014 { // <Title> Tests regular class regular property used in do/while expression.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.Property_decimalNull = -10.1M; dynamic dy = mc; do { mc.Property_decimalNull += 1; dy = mc; // for struct we should re-assign. } while ((decimal?)dy.Property_decimalNull < 0M); if ((decimal?)mc.Property_decimalNull == 0.9M) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass017.regclass017 { // <Title> Tests regular class regular property used in using expression.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System.IO; public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.myChar = (char)256; dynamic dy = mc; using (MemoryStream ms = new MemoryStream((int)dy.Property_char)) { if (ms.Capacity != 256) return 1; } return 0; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass018.regclass018 { // <Title> Tests regular class regular property used in try/catch/finally.</Title> // <Description> // try/catch/finally that uses an anonymous method and refer two dynamic parameters. // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System; public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.Property_decimalNullArr = new decimal?[] { 0M, 1M, 1.3M } ; mc.myStruct = new MyStruct() { Number = 3 } ; dynamic dy = mc; int result = -1; try { Func<decimal?[], MyStruct, int> func = delegate (decimal?[] x, MyStruct y) { int tmp = 0; foreach (decimal? d in x) { tmp += (int)d.Value; } tmp += y.Number; return tmp; } ; result = func((decimal?[])dy.Property_decimalNullArr, (MyStruct)dy.Property_MyStruct); } finally { result += (int)dy.Property_MyStruct.Number; } if (result != 8) return 1; return 0; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass019.regclass019 { // <Title> Tests regular class regular property used in foreach.</Title> // <Description> // foreach inside a using statement that uses the dynamic introduced by the using statement. // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System.IO; public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { int[] array = new int[] { 1, 2 } ; MemoryStream ms = new MemoryStream(new byte[] { 84, 101, 115, 116 } ); //Test MemberClass mc = new MemberClass(); mc.Property_dynamic = true; dynamic dy = mc; string result = string.Empty; using (dynamic sr = new StreamReader(ms, (bool)dy.Property_dynamic)) { foreach (int s in array) { ms.Position = 0; string m = ((StreamReader)sr).ReadToEnd(); result += m + s.ToString(); } } //Test1Test2 if (result == "Test1Test2") return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass020.regclass020 { // <Title> Tests regular class regular property used in iterator that calls to a lambda expression.</Title> // <Description> // foreach inside a using statement that uses the dynamic introduced by the using statement. // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System; using System.Collections; public class Test { private static MemberClass s_mc; private static dynamic s_dy; static Test() { s_mc = new MemberClass(); } [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { decimal index = 1M; s_mc.Property_decimalArr = new decimal[] { 1M, 2M, 3M } ; s_dy = s_mc; Test t = new Test(); foreach (decimal i in t.Increment(0)) { if (i != index) return 1; index = index + 1; } if (index != 4) return 1; return 0; } public IEnumerable Increment(int number) { while (number < s_mc.Property_decimalArr.Length) { Func<decimal[], decimal> func = (decimal[] x) => x[number++]; yield return func(s_dy.Property_decimalArr); } } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass021.regclass021 { // <Title> Tests regular class regular property used in object initializer inside a collection initializer.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System.Collections.Generic; public class Test { private float _field1; private float?[] _field2; [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.Property_Float = 1.23f; mc.Property_FloatNullArr = new float?[] { null, 1.33f } ; dynamic dy = mc; List<Test> list = new List<Test>() { new Test() { _field1 = dy.Property_Float, _field2 = dy.Property_FloatNullArr } } ; if (list.Count == 1 && list[0]._field1 == 1.23f && list[0]._field2.Length == 2 && list[0]._field2[0] == null && list[0]._field2[1] == 1.33f) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass022.regclass022 { // <Title> Tests regular class regular property used in static method body.</Title> // <Description> // set only property access. // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; dy.Property_MyClass = new MyClass() { Field = -1 } ; mc = dy; //to circumvent the boxing of the struct if (mc.myClass.Field == -1) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass023.regclass023 { // <Title> Tests regular class regular property used in static method body.</Title> // <Description> // Negative: set only property access // </Description> // <RelatedBugs></RelatedBugs> // <Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; dy.Property_MyClass = new MyClass() { Field = -1 } ; mc = dy; //to circumvent the boxing of the struct if (mc.myClass.Field != -1) return 1; return 0; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass024.regclass024 { // <Title> Tests regular class regular property used in throws.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> using System; public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; dy.Property_string = "Test Message"; try { throw new ArithmeticException((string)dy.Property_string); } catch (ArithmeticException ae) { if (ae.Message == "Test Message") return 0; } return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass025.regclass025 { // <Title> Tests regular class regular property used in field initializer.</Title> // <Description> // Negative // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { private static MemberClass s_mc; private static dynamic s_dy; private MyEnum _me = s_dy.Property_MyEnum; static Test() { s_mc = new MemberClass(); s_dy = s_mc; s_dy.Property_MyEnum = MyEnum.Third; } [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test t = new Test(); if (t._me != MyEnum.Third) return 1; return 0; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass026.regclass026 { // <Title> Tests regular class regular property used in set only property body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { private MemberClass _mc; private MyEnum? MyProp { set { _mc = new MemberClass(); dynamic dy = _mc; dy.Property_MyEnumNull = value; _mc = dy; // for struct. } } [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test t = new Test(); t.MyProp = MyEnum.Second; if (t._mc.myEnumNull == MyEnum.Second) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass027.regclass027 { // <Title> Tests regular class regular property used in read only property body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { private MyEnum MyProp { get { dynamic dy = new MemberClass(); dy.Property_MyEnumArr = new MyEnum[] { MyEnum.Second, default (MyEnum)} ; return dy.myEnumArr[0]; } } [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { Test t = new Test(); if (t.MyProp == MyEnum.Second) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass028.regclass028 { // <Title> Tests regular class regular property used in static read only property body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { private static MyEnum? MyProp { get { dynamic dy = new MemberClass(); dy.Property_MyEnumNullArr = new MyEnum?[] { null, MyEnum.Second, default (MyEnum)} ; return dy.myEnumNullArr[0]; } } [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { if (Test.MyProp == null) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass029.regclass029 { // <Title> Tests regular class regular property used in static method body.</Title> // <Description> // get only property access. // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.myStructNull = new MyStruct() { Number = int.MinValue } ; dynamic dy = mc; MyStruct? result = dy.Property_MyStructNull; if (result.Value.Number == int.MinValue) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass030.regclass030 { // <Title> Tests regular class regular property used in method call argument.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.myStructArr = new MyStruct[] { new MyStruct() { Number = 1 } , new MyStruct() { Number = -1 } } ; dynamic dy = mc; bool result = TestMethod(1, string.Empty, (MyStruct[])dy.Property_MyStructArr); if (result) return 0; return 1; } private static bool TestMethod<V, U>(V v, U u, params MyStruct[] ms) { if (v.GetType() != typeof(int)) return false; if (u.GetType() != typeof(string)) return false; if (ms.Length != 2 || ms[0].Number != 1 || ms[1].Number != -1) return false; return true; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass031.regclass031 { // <Title> Tests regular class regular property used in static method body.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; mc.myStructNullArr = new MyStruct?[] { null, new MyStruct() { Number = -1 } } ; if (((MyStruct?[])dy.Property_MyStructNullArr)[0] == null) return 0; return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass032.regclass032 { // <Title> Tests regular class regular property used in lock expression.</Title> // <Description> // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test : MemberClass { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); dynamic dy = mc; dy.Property_shortNull = (short)-1; try { lock (dy.Property_shortNull) { } } catch (Microsoft.CSharp.RuntimeBinder.RuntimeBinderException e) { if (ErrorVerifier.Verify(ErrorMessageId.BadProtectedAccess, e.Message, "MemberClass.Property_shortNull", "MemberClass", "Test")) return 0; } return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass034.regclass034 { // <Title> Tests regular class regular property used in foreach loop.</Title> // <Description> // Negative // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { [Fact] public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { MemberClass mc = new MemberClass(); mc.Property_shortArr = new short[] { 1, 2, 3, 4, 5 } ; dynamic dy = mc; short i = 1; try { foreach (var x in dy.Property_shortArr) //protected { if (i++ != (short)x) return 1; } } catch (Microsoft.CSharp.RuntimeBinder.RuntimeBinderException e) { if (ErrorVerifier.Verify(ErrorMessageId.InaccessibleGetter, e.Message, "MemberClass.Property_shortArr")) return 0; } return 1; } } //</Code> } namespace ManagedTests.DynamicCSharp.Conformance.dynamic.context.property.regproperty.regclass.regclass035.regclass035 { // <Title> Tests regular class regular property used in method body.</Title> // <Description> // Negative // </Description> // <RelatedBugs></RelatedBugs> //<Expects Status=success></Expects> // <Code> public class Test { public static void DynamicCSharpRunTest() { Assert.Equal(0, MainMethod()); } public static int MainMethod() { return Test.TestGetMethod(new MemberClass()) + Test.TestSetMethod(new MemberClass()) == 0 ? 0 : 1; } public static int TestGetMethod(MemberClass mc) { dynamic dy = mc; mc.Property_ulongNullArr = new ulong?[] { null, 1 } ; if (dy.Property_ulongNullArr.Length == 2 && dy.Property_ulongNullArr[0] == null && dy.Property_ulongNullArr[1] == 1) return 0; else return 1; } public static int TestSetMethod(MemberClass mc) { dynamic dy = mc; dy.Property_ulongNullArr = new ulong?[] { null, 1 } ; if (mc.Property_ulongNullArr.Length == 2 && mc.Property_ulongNullArr[0] == null && mc.Property_ulongNullArr[1] == 1) return 0; else return 1; } } //</Code> }
-1
dotnet/runtime
66,208
JIT: add OSR patchpoint strategy, inhibit tail duplication
Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
AndyAyersMS
2022-03-04T19:22:14Z
2022-03-06T16:26:44Z
6fce82cb7b111ba9a2547b70b4592cea98ae314e
f9da3db92420d15b5bba283a44271cd81d83ad1a
JIT: add OSR patchpoint strategy, inhibit tail duplication. Two changes for OSR: * add new strategies for placing patchpoints -- either at backedge sources (instead of targets) or adaptive. depending on number of backedges. Change default to adaptive, since this works better with the flow we see from C# `for` loops. * inhibit tail duplication for OSR as it may end up interfering with loop recognition. We may not be able to place patchpoints at sources, for various reasons; if so we fall back to placing them at targets.
./src/tests/Interop/PInvoke/Variant/CMakeLists.txt
project (VariantNative) include ("${CLR_INTEROP_TEST_ROOT}/Interop.cmake") set(SOURCES VariantNative.cpp) # add the executable add_library (VariantNative SHARED ${SOURCES}) if(CLR_CMAKE_HOST_WIN32) list(APPEND LINK_LIBRARIES_ADDITIONAL OleAut32.lib ) endif(CLR_CMAKE_HOST_WIN32) target_link_libraries(VariantNative ${LINK_LIBRARIES_ADDITIONAL}) # add the install targets install (TARGETS VariantNative DESTINATION bin)
project (VariantNative) include ("${CLR_INTEROP_TEST_ROOT}/Interop.cmake") set(SOURCES VariantNative.cpp) # add the executable add_library (VariantNative SHARED ${SOURCES}) if(CLR_CMAKE_HOST_WIN32) list(APPEND LINK_LIBRARIES_ADDITIONAL OleAut32.lib ) endif(CLR_CMAKE_HOST_WIN32) target_link_libraries(VariantNative ${LINK_LIBRARIES_ADDITIONAL}) # add the install targets install (TARGETS VariantNative DESTINATION bin)
-1